slug
stringlengths
15
15
content
listlengths
1
129
rawContent
stringlengths
1
2k
author
dict
attachments
listlengths
0
49
mentions
listlengths
0
49
reactions
listlengths
0
12
publishedAt
stringlengths
24
24
updatedAt
stringlengths
24
24
commentators
listlengths
0
52
url
stringlengths
25
46
totalUniqueImpressions
int64
1
42.1k
โŒ€
numComments
int64
0
621
136027179040023
[ { "type": "text", "value": "I solved the biggest math problem associated with the Attention Mechanism. it works, better than I ever expected. Test it all yourself. Everything you need is linked from this video: ", "raw": "I solved the biggest math problem associated with the Attention Mechanism. it works, better than I ever expected. Test it all yourself. Everything you need is linked from this video: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/41dF0yoz0qo", "href": "https://youtu.be/41dF0yoz0qo", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Sorry the audio quality sucks, I will buy a new microphone today. Why does some moron like me solve these things and not you? I know more about how computers work than you do, that's it. Swarm algorithms were big in the 90's and early 2000's. Computers were absolute dog doo doo then in one specific way, compared to now. That one way, which everyone overlooks, is the entire secret behind why swarm algorithms are so good. ", "raw": "Sorry the audio quality sucks, I will buy a new microphone today. Why does some moron like me solve these things and not you? I know more about how computers work than you do, that's it. Swarm algorithms were big in the 90's and early 2000's. Computers were absolute dog doo doo then in one specific way, compared to now. That one way, which everyone overlooks, is the entire secret behind why swarm algorithms are so good. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I solved the biggest math problem associated with the Attention Mechanism. it works, better than I ever expected. Test it all yourself. Everything you need is linked from this video: https://youtu.be/41dF0yoz0qo Sorry the audio quality sucks, I will buy a new microphone today. Why does some moron like me solve these things and not you? I know more about how computers work than you do, that's it. Swarm algorithms were big in the 90's and early 2000's. Computers were absolute dog doo doo then in one specific way, compared to now. That one way, which everyone overlooks, is the entire secret behind why swarm algorithms are so good.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "hassenhamdi", "YaTharThShaRma999", "Joseph717171", "tousif1988", "parikhkadam", "DeathGodlike", "atlury", "nicolay-r", "kdqemre" ], "count": 9 }, { "reaction": "๐Ÿ˜Ž", "users": [ "LeroyDyer", "Joseph717171", "John6666", "DeathGodlike", "louisbrulenaudet", "nicolay-r" ], "count": 6 } ]
2024-09-29T17:20:14.000Z
2024-09-29T17:20:14.370Z
[]
/posts/TuringsSolutions/136027179040023
3,193
0
752270169127687
[ { "type": "text", "value": "Researchers have developed a novel approach called Logic-of-Thought (LoT) that significantly enhances the logical reasoning capabilities of large language models (LLMs).", "raw": "Researchers have developed a novel approach called Logic-of-Thought (LoT) that significantly enhances the logical reasoning capabilities of large language models (LLMs).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Here are the steps on how Logic-of-Thought (LoT) is implemented:", "raw": "Here are the steps on how Logic-of-Thought (LoT) is implemented:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "-- 1. Logic Extraction", "raw": "-- 1. Logic Extraction", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Use Large Language Models (LLMs) to identify sentences containing conditional reasoning relationships from the input context.", "raw": "1. Use Large Language Models (LLMs) to identify sentences containing conditional reasoning relationships from the input context.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Generate a collection of sentences with logical relationships.", "raw": "2. Generate a collection of sentences with logical relationships.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Use LLMs to extract the set of propositional symbols and logical expressions from the collection.", "raw": "3. Use LLMs to extract the set of propositional symbols and logical expressions from the collection.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4. Identify propositions with similar meanings and represent them using identical propositional symbols.", "raw": "4. Identify propositions with similar meanings and represent them using identical propositional symbols.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "5. Analyze the logical relationships between propositions based on their natural language descriptions.", "raw": "5. Analyze the logical relationships between propositions based on their natural language descriptions.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "6. Add negation (ยฌ) for propositions that express opposite meanings.", "raw": "6. Add negation (ยฌ) for propositions that express opposite meanings.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "7. Use implication (โ†’) to connect propositional symbols when a conditional relationship exists.", "raw": "7. Use implication (โ†’) to connect propositional symbols when a conditional relationship exists.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "-- 2. Logic Extension", "raw": "-- 2. Logic Extension", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Apply logical reasoning laws to the collection of logical expressions from the Logic Extraction phase.", "raw": "1. Apply logical reasoning laws to the collection of logical expressions from the Logic Extraction phase.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Use a Python program to implement logical deduction and expand the expressions.", "raw": "2. Use a Python program to implement logical deduction and expand the expressions.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Apply logical laws such as Double Negation, Contraposition, and Transitivity to derive new logical expressions.", "raw": "3. Apply logical laws such as Double Negation, Contraposition, and Transitivity to derive new logical expressions.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "-- 3. Logic Translation", "raw": "-- 3. Logic Translation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Use LLMs to translate the newly generated logical expressions into natural language descriptions.", "raw": "1. Use LLMs to translate the newly generated logical expressions into natural language descriptions.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Combine the natural language descriptions of propositional symbols according to the extended logical expressions.", "raw": "2. Combine the natural language descriptions of propositional symbols according to the extended logical expressions.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Incorporate the translated logical information as a new part of the original input prompt.", "raw": "3. Incorporate the translated logical information as a new part of the original input prompt.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "-- 4. Integration with Existing Prompting Methods", "raw": "-- 4. Integration with Existing Prompting Methods", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Combine the LoT-generated logical information with the original prompt.", "raw": "1. Combine the LoT-generated logical information with the original prompt.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Use this enhanced prompt with existing prompting methods like Chain-of-Thought (CoT), Self-Consistency (SC), or Tree-of-Thoughts (ToT).", "raw": "2. Use this enhanced prompt with existing prompting methods like Chain-of-Thought (CoT), Self-Consistency (SC), or Tree-of-Thoughts (ToT).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Feed the augmented prompt to the LLM to generate the final answer.", "raw": "3. Feed the augmented prompt to the LLM to generate the final answer.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What do you think about LoT?", "raw": "What do you think about LoT?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Researchers have developed a novel approach called Logic-of-Thought (LoT) that significantly enhances the logical reasoning capabilities of large language models (LLMs). Here are the steps on how Logic-of-Thought (LoT) is implemented: -- 1. Logic Extraction 1. Use Large Language Models (LLMs) to identify sentences containing conditional reasoning relationships from the input context. 2. Generate a collection of sentences with logical relationships. 3. Use LLMs to extract the set of propositional symbols and logical expressions from the collection. 4. Identify propositions with similar meanings and represent them using identical propositional symbols. 5. Analyze the logical relationships between propositions based on their natural language descriptions. 6. Add negation (ยฌ) for propositions that express opposite meanings. 7. Use implication (โ†’) to connect propositional symbols when a conditional relationship exists. -- 2. Logic Extension 1. Apply logical reasoning laws to the collection of logical expressions from the Logic Extraction phase. 2. Use a Python program to implement logical deduction and expand the expressions. 3. Apply logical laws such as Double Negation, Contraposition, and Transitivity to derive new logical expressions. -- 3. Logic Translation 1. Use LLMs to translate the newly generated logical expressions into natural language descriptions. 2. Combine the natural language descriptions of propositional symbols according to the extended logical expressions. 3. Incorporate the translated logical information as a new part of the original input prompt. -- 4. Integration with Existing Prompting Methods 1. Combine the LoT-generated logical information with the original prompt. 2. Use this enhanced prompt with existing prompting methods like Chain-of-Thought (CoT), Self-Consistency (SC), or Tree-of-Thoughts (ToT). 3. Feed the augmented prompt to the LLM to generate the final answer. What do you think about LoT?
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/HvmAh7UmIQY9WM3UblRzG.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "Athaz01", "onlyoneplease", "awkzardxxx", "Inflammable1230", "grosa1", "securerat", "John6666", "mammour", "piyushmaharana", "louisbrulenaudet", "whitebill", "den0620", "nmitchko", "nicolay-r", "jharshraj", "atlury", "Winnougan", "StoicCodingLab" ], "count": 18 }, { "reaction": "๐Ÿ”ฅ", "users": [ "grosa1", "piyushmaharana" ], "count": 2 }, { "reaction": "๐Ÿ˜Ž", "users": [ "Winnougan" ], "count": 1 }, { "reaction": "๐Ÿคฏ", "users": [ "Winnougan" ], "count": 1 }, { "reaction": "๐Ÿง ", "users": [ "Winnougan" ], "count": 1 } ]
2024-09-29T15:07:17.000Z
2024-09-30T21:11:03.963Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e62d11d27a8292c3637f86/aptDeBHpCJxcREj6KPLN1.jpeg", "fullname": "Nicolay Rusnachenko", "name": "nicolay-r", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 49, "isFollowing": false } ]
/posts/singhsidhukuldeep/752270169127687
3,986
1
517749756137337
[ { "type": "text", "value": "Exciting news! Introducing super-fast AI video assistant, currently in beta. With a minimum latency of under 500ms and an average latency of just 600ms.", "raw": "Exciting news! Introducing super-fast AI video assistant, currently in beta. With a minimum latency of under 500ms and an average latency of just 600ms.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "DEMO LINK:", "raw": "DEMO LINK:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/KingNish/Live-Video-Chat", "href": null, "resource": { "type": "space", "id": "KingNish/Live-Video-Chat", "discussionNum": null }, "url": "https://huggingface.co/spaces/KingNish/Live-Video-Chat", "code": null, "user": null, "label": null, "lang": null } ]
Exciting news! Introducing super-fast AI video assistant, currently in beta. With a minimum latency of under 500ms and an average latency of just 600ms. DEMO LINK: https://huggingface.co/spaces/KingNish/Live-Video-Chat
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6612aedf09f16e7347dfa7e1/bPYjBXCedY_1fSIPjoBTY.jpeg", "fullname": "Nishith Jain", "name": "KingNish", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1079, "isFollowing": false }
[]
[]
[ { "reaction": "โค๏ธ", "users": [ "ijohn07", "Arivmta19", "Sansy30" ], "count": 3 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "den0620" ], "count": 2 } ]
2024-09-29T12:16:24.000Z
2024-10-29T13:33:00.712Z
[ { "avatarUrl": "/avatars/817a1bcf34c60c44fce562233ca5dfb0.svg", "fullname": "PENDYALA HARSHA VARDHAN", "name": "HarshaSunny", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false } ]
/posts/KingNish/517749756137337
5,977
1
334746128110541
[ { "type": "text", "value": "๐Ÿ”ฅ๐ŸŽญ๐ŸŒŸ New Research Alert - HeadGAP (Avatars Collection)! ๐ŸŒŸ๐ŸŽญ๐Ÿ”ฅ", "raw": "๐Ÿ”ฅ๐ŸŽญ๐ŸŒŸ New Research Alert - HeadGAP (Avatars Collection)! ๐ŸŒŸ๐ŸŽญ๐Ÿ”ฅ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“„ Title: HeadGAP: Few-shot 3D Head Avatar via Generalizable Gaussian Priors ๐Ÿ”", "raw": "๐Ÿ“„ Title: HeadGAP: Few-shot 3D Head Avatar via Generalizable Gaussian Priors ๐Ÿ”", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“ Description: HeadGAP introduces a novel method for generating high-fidelity, animatable 3D head avatars from few-shot data, using Gaussian priors and dynamic part-based modelling for personalized and generalizable results.", "raw": "๐Ÿ“ Description: HeadGAP introduces a novel method for generating high-fidelity, animatable 3D head avatars from few-shot data, using Gaussian priors and dynamic part-based modelling for personalized and generalizable results.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ‘ฅ Authors: ", "raw": "๐Ÿ‘ฅ Authors: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@zxz267", "href": null, "resource": null, "url": null, "code": null, "user": "zxz267", "label": null, "lang": null }, { "type": "text", "value": ", ", "raw": ", ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@walsvid", "href": null, "resource": null, "url": null, "code": null, "user": "walsvid", "label": null, "lang": null }, { "type": "text", "value": ", ", "raw": ", ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@zhaohu2", "href": null, "resource": null, "url": null, "code": null, "user": "zhaohu2", "label": null, "lang": null }, { "type": "text", "value": ", Weiyi Zhang, ", "raw": ", Weiyi Zhang, ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@hellozhuo", "href": null, "resource": null, "url": null, "code": null, "user": "hellozhuo", "label": null, "lang": null }, { "type": "text", "value": ", Xu Chang, Yang Zhao, Zheng Lv, Xiaoyuan Zhang, ", "raw": ", Xu Chang, Yang Zhao, Zheng Lv, Xiaoyuan Zhang, ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@yongjie-zhang-mail", "href": null, "resource": null, "url": null, "code": null, "user": "yongjie-zhang-mail", "label": null, "lang": null }, { "type": "text", "value": ", Guidong Wang, and Lan Xu", "raw": ", Guidong Wang, and Lan Xu", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“„ Paper: ", "raw": "๐Ÿ“„ Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2408.06019", "href": null, "resource": { "type": "paper", "id": "2408.06019", "discussionNum": null }, "url": "https://huggingface.co/papers/2408.06019", "code": null, "user": null, "label": "HeadGAP: Few-shot 3D Head Avatar via Generalizable Gaussian Priors (2408.06019)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐ŸŒ Github Page: ", "raw": "๐ŸŒ Github Page: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://headgap.github.io", "href": "https://headgap.github.io", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿš€ CVPR-2023-24-Papers: ", "raw": "๐Ÿš€ CVPR-2023-24-Papers: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/DmitryRyumin/CVPR-2023-24-Papers", "href": "https://github.com/DmitryRyumin/CVPR-2023-24-Papers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿš€ WACV-2024-Papers: ", "raw": "๐Ÿš€ WACV-2024-Papers: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/DmitryRyumin/WACV-2024-Papers", "href": "https://github.com/DmitryRyumin/WACV-2024-Papers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿš€ ICCV-2023-Papers: ", "raw": "๐Ÿš€ ICCV-2023-Papers: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/DmitryRyumin/ICCV-2023-Papers", "href": "https://github.com/DmitryRyumin/ICCV-2023-Papers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“š More Papers: more cutting-edge research presented at other conferences in the ", "raw": "๐Ÿ“š More Papers: more cutting-edge research presented at other conferences in the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers", "href": null, "resource": { "type": "space", "id": "DmitryRyumin/NewEraAI-Papers", "discussionNum": null }, "url": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " curated by ", "raw": " curated by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@DmitryRyumin", "href": null, "resource": null, "url": null, "code": null, "user": "DmitryRyumin", "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿš€ Added to the Avatars Collection: ", "raw": "๐Ÿš€ Added to the Avatars Collection: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "href": null, "resource": { "type": "collection", "id": "DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "discussionNum": null }, "url": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ” Keywords: #HeadGAP #3DAvatar #FewShotLearning #GaussianPriors #AvatarCreation #3DModeling #MachineLearning #ComputerVision #ComputerGraphics #GenerativeAI #DeepLearning #AI", "raw": "๐Ÿ” Keywords: #HeadGAP #3DAvatar #FewShotLearning #GaussianPriors #AvatarCreation #3DModeling #MachineLearning #ComputerVision #ComputerGraphics #GenerativeAI #DeepLearning #AI", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿ”ฅ๐ŸŽญ๐ŸŒŸ New Research Alert - HeadGAP (Avatars Collection)! ๐ŸŒŸ๐ŸŽญ๐Ÿ”ฅ ๐Ÿ“„ Title: HeadGAP: Few-shot 3D Head Avatar via Generalizable Gaussian Priors ๐Ÿ” ๐Ÿ“ Description: HeadGAP introduces a novel method for generating high-fidelity, animatable 3D head avatars from few-shot data, using Gaussian priors and dynamic part-based modelling for personalized and generalizable results. ๐Ÿ‘ฅ Authors: @zxz267, @walsvid, @zhaohu2, Weiyi Zhang, @hellozhuo, Xu Chang, Yang Zhao, Zheng Lv, Xiaoyuan Zhang, @yongjie-zhang-mail, Guidong Wang, and Lan Xu ๐Ÿ“„ Paper: https://huggingface.co/papers/2408.06019 ๐ŸŒ Github Page: https://headgap.github.io ๐Ÿš€ CVPR-2023-24-Papers: https://github.com/DmitryRyumin/CVPR-2023-24-Papers ๐Ÿš€ WACV-2024-Papers: https://github.com/DmitryRyumin/WACV-2024-Papers ๐Ÿš€ ICCV-2023-Papers: https://github.com/DmitryRyumin/ICCV-2023-Papers ๐Ÿ“š More Papers: more cutting-edge research presented at other conferences in the https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers curated by @DmitryRyumin ๐Ÿš€ Added to the Avatars Collection: https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36 ๐Ÿ” Keywords: #HeadGAP #3DAvatar #FewShotLearning #GaussianPriors #AvatarCreation #3DModeling #MachineLearning #ComputerVision #ComputerGraphics #GenerativeAI #DeepLearning #AI
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg", "fullname": "Dmitry Ryumin", "name": "DmitryRyumin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 377, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/UAwD2w_3pLshLn8kO9pvu.gif" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/XNyfGFL479EkGY_q9qdnd.gif" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/59p56rhLeiXhtP798MYy3.gif" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/wtqkVg5sjbgGWna9gGB9P.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/2yntdEX3eFRCldd2rZHoL.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/WP79zWJuv7A1bIAGTpBLE.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/GR88OyW2TM8I4NZbTDGjn.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/2r9zuBEvSe3BQUez5Yc6d.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/RXRO8yeFxiviYzNIAFUBH.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg", "fullname": "Dmitry Ryumin", "name": "DmitryRyumin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 377 }, { "avatarUrl": "/avatars/f0e5af3de113b998bcf84953ecd8b930.svg", "fullname": "Zhuo Su", "name": "hellozhuo", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/650d0c442a602ba349183fca/-UId-lPK64nRXjGxPJE9k.png", "fullname": "Chao Wen", "name": "walsvid", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1 }, { "avatarUrl": "/avatars/0e849359c0acac44d2513775cb6d3300.svg", "fullname": "Zhang Yongjie", "name": "yongjie-zhang-mail", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null }, { "avatarUrl": "/avatars/2835e50156b9ae5cdfdc8514e6a102d0.svg", "fullname": "zhaohu li", "name": "zhaohu2", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1 }, { "avatarUrl": "/avatars/fbc429db27443fb47769167f0eb3fd9e.svg", "fullname": "Xiaozheng Zheng", "name": "zxz267", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1 } ]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "DmitryRyumin", "zxz267", "umair894", "walsvid", "Salvor", "John6666", "zkiener", "adevbanshi" ], "count": 8 }, { "reaction": "๐Ÿค—", "users": [ "DmitryRyumin", "zxz267", "walsvid" ], "count": 3 } ]
2024-09-29T09:55:29.000Z
2024-09-29T09:55:29.890Z
[]
/posts/DmitryRyumin/334746128110541
2,525
0
601513758334151
[ { "type": "text", "value": "As AI models become more widespread, it is essential to address their potential risks and vulnerabilities. Open-source AI is poised to be a driving force behind tomorrow's innovations in this field. This paper examines the current landscape of security and safety in open-source AI models and outlines concrete measures to monitor and mitigate associated risks effectively.", "raw": "As AI models become more widespread, it is essential to address their potential risks and vulnerabilities. Open-source AI is poised to be a driving force behind tomorrow's innovations in this field. This paper examines the current landscape of security and safety in open-source AI models and outlines concrete measures to monitor and mitigate associated risks effectively.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2411.12275", "href": null, "resource": { "type": "paper", "id": "2411.12275", "discussionNum": null }, "url": "https://huggingface.co/papers/2411.12275", "code": null, "user": null, "label": "Building Trust: Foundations of Security, Safety and Transparency in AI (2411.12275)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
As AI models become more widespread, it is essential to address their potential risks and vulnerabilities. Open-source AI is poised to be a driving force behind tomorrow's innovations in this field. This paper examines the current landscape of security and safety in open-source AI models and outlines concrete measures to monitor and mitigate associated risks effectively. https://huggingface.co/papers/2411.12275
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/66c6a7667d41b231148b3135/k0-hHlIdN6pLBUP35eQoC.jpeg", "fullname": "Huzaifa Sidhpurwala", "name": "huzaifas-sidhpurwala", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "clem" ], "count": 2 } ]
2024-11-21T07:12:29.000Z
2024-11-21T07:12:29.481Z
[]
/posts/huzaifas-sidhpurwala/601513758334151
695
0
234887808967726
[ { "type": "text", "value": "It's always exciting to revisit Google's DCN paperโ€”impractical but good!", "raw": "It's always exciting to revisit Google's DCN paperโ€”impractical but good!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Deep & Cross Network (DCN) - a groundbreaking approach to click-through rate prediction that's revolutionizing digital advertising!", "raw": "Deep & Cross Network (DCN) - a groundbreaking approach to click-through rate prediction that's revolutionizing digital advertising!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Key Innovation:", "raw": "Key Innovation:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "DCN introduces a novel cross-network architecture that automatically learns feature interactions without manual engineering. What sets it apart is its ability to explicitly model bounded-degree feature crossings while maintaining the power of deep neural networks.", "raw": "DCN introduces a novel cross-network architecture that automatically learns feature interactions without manual engineering. What sets it apart is its ability to explicitly model bounded-degree feature crossings while maintaining the power of deep neural networks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Technical Deep Dive:", "raw": "Technical Deep Dive:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- The architecture combines a cross network with a deep network in parallel.", "raw": "- The architecture combines a cross network with a deep network in parallel.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- The cross network performs automatic feature crossing at each layer.", "raw": "- The cross network performs automatic feature crossing at each layer.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- The embedding layer transforms sparse categorical features into dense vectors.", "raw": "- The embedding layer transforms sparse categorical features into dense vectors.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Cross layers use a unique formula that enables efficient high-degree polynomial feature interactions.", "raw": "- Cross layers use a unique formula that enables efficient high-degree polynomial feature interactions.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Memory-efficient design with linear complexity O(d) in the input dimension.", "raw": "- Memory-efficient design with linear complexity O(d) in the input dimension.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Performance Highlights:", "raw": "Performance Highlights:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Outperforms traditional DNN models with 60% less memory usage.", "raw": "- Outperforms traditional DNN models with 60% less memory usage.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Achieved 0.4419 logloss on the Criteo Display Ads dataset.", "raw": "- Achieved 0.4419 logloss on the Criteo Display Ads dataset.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Consistently performs better than state-of-the-art models like Deep Crossing and Factorization Machines.", "raw": "- Consistently performs better than state-of-the-art models like Deep Crossing and Factorization Machines.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Exceptional performance on non-CTR tasks like Forest Covertype (97.40% accuracy).", "raw": "- Exceptional performance on non-CTR tasks like Forest Covertype (97.40% accuracy).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Under the Hood:", "raw": "Under the Hood:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Uses embedding vectors of dimension 6 ร— (category cardinality)^1/4.", "raw": "- Uses embedding vectors of dimension 6 ร— (category cardinality)^1/4.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Implements batch normalization and the Adam optimizer.", "raw": "- Implements batch normalization and the Adam optimizer.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- The cross network depth determines the highest polynomial degree of feature interactions.", "raw": "- The cross network depth determines the highest polynomial degree of feature interactions.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- An efficient projection mechanism reduces cubic computational cost to linear.", "raw": "- An efficient projection mechanism reduces cubic computational cost to linear.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Parameter sharing enables better generalization to unseen feature interactions.", "raw": "- Parameter sharing enables better generalization to unseen feature interactions.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Key Advantages:", "raw": "Key Advantages:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. No manual feature engineering required.", "raw": "1. No manual feature engineering required.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Explicit feature crossing at each layer.", "raw": "2. Explicit feature crossing at each layer.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Highly memory-efficient.", "raw": "3. Highly memory-efficient.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4. Scalable to web-scale data.", "raw": "4. Scalable to web-scale data.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "5. Robust performance across different domains.", "raw": "5. Robust performance across different domains.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Thoughts on how this could transform digital advertising?", "raw": "Thoughts on how this could transform digital advertising?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
It's always exciting to revisit Google's DCN paperโ€”impractical but good! Deep & Cross Network (DCN) - a groundbreaking approach to click-through rate prediction that's revolutionizing digital advertising! Key Innovation: DCN introduces a novel cross-network architecture that automatically learns feature interactions without manual engineering. What sets it apart is its ability to explicitly model bounded-degree feature crossings while maintaining the power of deep neural networks. Technical Deep Dive: - The architecture combines a cross network with a deep network in parallel. - The cross network performs automatic feature crossing at each layer. - The embedding layer transforms sparse categorical features into dense vectors. - Cross layers use a unique formula that enables efficient high-degree polynomial feature interactions. - Memory-efficient design with linear complexity O(d) in the input dimension. Performance Highlights: - Outperforms traditional DNN models with 60% less memory usage. - Achieved 0.4419 logloss on the Criteo Display Ads dataset. - Consistently performs better than state-of-the-art models like Deep Crossing and Factorization Machines. - Exceptional performance on non-CTR tasks like Forest Covertype (97.40% accuracy). Under the Hood: - Uses embedding vectors of dimension 6 ร— (category cardinality)^1/4. - Implements batch normalization and the Adam optimizer. - The cross network depth determines the highest polynomial degree of feature interactions. - An efficient projection mechanism reduces cubic computational cost to linear. - Parameter sharing enables better generalization to unseen feature interactions. Key Advantages: 1. No manual feature engineering required. 2. Explicit feature crossing at each layer. 3. Highly memory-efficient. 4. Scalable to web-scale data. 5. Robust performance across different domains. Thoughts on how this could transform digital advertising?
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/3miyBTwHYC5AtSwqgM7vF.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "Zhaojjiahui" ], "count": 2 } ]
2024-11-21T06:13:04.000Z
2024-11-22T11:03:31.230Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62d5785bc46278c4a9930328/XMuMm6-jXnGRYLqhHJN2r.jpeg", "fullname": "RedRedRed", "name": "RedSparkie", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false } ]
/posts/singhsidhukuldeep/234887808967726
869
2
723309911090865
[ { "type": "text", "value": "๐ŸŽ‰ We are excited to announce our latest research on video editing - StableV2V!", "raw": "๐ŸŽ‰ We are excited to announce our latest research on video editing - StableV2V!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ’ญ StableV2V aims to perform video editing with aligned shape consistency to user prompt, even if which might cause significant shape differences.", "raw": "๐Ÿ’ญ StableV2V aims to perform video editing with aligned shape consistency to user prompt, even if which might cause significant shape differences.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“š Besides, we curate a testing benchmark, namely DAVIS-Edit, for video editing, comprising of both text-based and image-based applications.", "raw": "๐Ÿ“š Besides, we curate a testing benchmark, namely DAVIS-Edit, for video editing, comprising of both text-based and image-based applications.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿš€ We have open-sourced our paper, code, model weights, and DAVIS-Edit, which you may refer to more details of StableV2V from the following link:", "raw": "๐Ÿš€ We have open-sourced our paper, code, model weights, and DAVIS-Edit, which you may refer to more details of StableV2V from the following link:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- arXiv paper: ", "raw": "- arXiv paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2411.11045", "href": "https://arxiv.org/abs/2411.11045", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Project page: ", "raw": "- Project page: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://alonzoleeeooo.github.io/StableV2V/", "href": "https://alonzoleeeooo.github.io/StableV2V/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- GitHub: ", "raw": "- GitHub: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/AlonzoLeeeooo/StableV2V", "href": "https://github.com/AlonzoLeeeooo/StableV2V", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- HuggingFace model repo: ", "raw": "- HuggingFace model repo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/AlonzoLeeeooo/StableV2V", "href": null, "resource": { "type": "model", "id": "AlonzoLeeeooo/StableV2V", "discussionNum": null }, "url": "https://huggingface.co/AlonzoLeeeooo/StableV2V", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- HuggingFace dataset repo: ", "raw": "- HuggingFace dataset repo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/AlonzoLeeeooo/DAVIS-Edit", "href": null, "resource": { "type": "dataset", "id": "AlonzoLeeeooo/DAVIS-Edit", "discussionNum": null }, "url": "https://huggingface.co/datasets/AlonzoLeeeooo/DAVIS-Edit", "code": null, "user": null, "label": null, "lang": null } ]
๐ŸŽ‰ We are excited to announce our latest research on video editing - StableV2V! ๐Ÿ’ญ StableV2V aims to perform video editing with aligned shape consistency to user prompt, even if which might cause significant shape differences. ๐Ÿ“š Besides, we curate a testing benchmark, namely DAVIS-Edit, for video editing, comprising of both text-based and image-based applications. ๐Ÿš€ We have open-sourced our paper, code, model weights, and DAVIS-Edit, which you may refer to more details of StableV2V from the following link: - arXiv paper: https://arxiv.org/abs/2411.11045 - Project page: https://alonzoleeeooo.github.io/StableV2V/ - GitHub: https://github.com/AlonzoLeeeooo/StableV2V - HuggingFace model repo: https://huggingface.co/AlonzoLeeeooo/StableV2V - HuggingFace dataset repo: https://huggingface.co/datasets/AlonzoLeeeooo/DAVIS-Edit
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6322cae4212b17c7728e7387/p-hJt0795EO_wlUVY39rL.jpeg", "fullname": "Chang Liu", "name": "AlonzoLeeeooo", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6322cae4212b17c7728e7387/OF6WuDtf9Oya3oZrz5x5E.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6322cae4212b17c7728e7387/ENlghu8AHemzW-bdozJf6.png" } ]
[]
[ { "reaction": "๐Ÿš€", "users": [ "AlonzoLeeeooo", "John6666", "clem", "big-daddy-alonzo" ], "count": 4 }, { "reaction": "๐Ÿ”ฅ", "users": [ "big-daddy-alonzo", "AlonzoLeeeooo" ], "count": 2 } ]
2024-11-21T04:24:53.000Z
2024-11-21T04:24:53.665Z
[]
/posts/AlonzoLeeeooo/723309911090865
850
0
818749713246386
[ { "type": "text", "value": "MOUSE-I: Transform a Prompt into a Live Web Service", "raw": "MOUSE-I: Transform a Prompt into a Live Web Service", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "\"From Prompt to Global Service in 60 Seconds\"", "raw": "\"From Prompt to Global Service in 60 Seconds\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The Future of Web Development", "raw": "The Future of Web Development", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "MOUSE-I revolutionizes web development by converting a single prompt into a fully functional, globally deployed web service through AI automation and enterprise-grade infrastructure.", "raw": "MOUSE-I revolutionizes web development by converting a single prompt into a fully functional, globally deployed web service through AI automation and enterprise-grade infrastructure.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โšก Lightning-Fast Pipeline (60 Seconds)", "raw": "โšก Lightning-Fast Pipeline (60 Seconds)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. AI Prompt Enhancement (5s)", "raw": "1. AI Prompt Enhancement (5s)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Instant requirement analysis", "raw": "Instant requirement analysis", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Tech stack optimization", "raw": "Tech stack optimization", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Development spec generation", "raw": "Development spec generation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Code Creation (49s)", "raw": "2. Code Creation (49s)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Production-ready code", "raw": "Production-ready code", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Responsive design", "raw": "Responsive design", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Performance-optimized", "raw": "Performance-optimized", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Live Rendering (1s)", "raw": "3. Live Rendering (1s)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Instant visualization", "raw": "Instant visualization", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Real-time testing", "raw": "Real-time testing", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4. Global Deployment (5s)", "raw": "4. Global Deployment (5s)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Vercel infrastructure", "raw": "Vercel infrastructure", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Global CDN", "raw": "Global CDN", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Automatic HTTPS", "raw": "Automatic HTTPS", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐ŸŽฏ Key Differentiators", "raw": "๐ŸŽฏ Key Differentiators", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Instant Results: From idea to live URL in 60 seconds", "raw": "Instant Results: From idea to live URL in 60 seconds", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Enterprise Quality: Production-grade code and infrastructure", "raw": "Enterprise Quality: Production-grade code and infrastructure", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Zero Configuration: No setup or technical knowledge required", "raw": "Zero Configuration: No setup or technical knowledge required", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "40+ Templates: Ready-to-use solutions for games, dashboards, and apps", "raw": "40+ Templates: Ready-to-use solutions for games, dashboards, and apps", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ’ซ Perfect For", "raw": "๐Ÿ’ซ Perfect For", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Startups needing quick MVPs", "raw": "Startups needing quick MVPs", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Developers prototyping ideas", "raw": "Developers prototyping ideas", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Non-technical founders building web services", "raw": "Non-technical founders building web services", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Educators creating interactive tools", "raw": "Educators creating interactive tools", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿš€ Get Started", "raw": "๐Ÿš€ Get Started", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Visit MOUSE-I Gallery", "raw": "Visit MOUSE-I Gallery", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Enter your prompt", "raw": "Enter your prompt", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Get your live service in 60 seconds", "raw": "Get your live service in 60 seconds", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ’ก Connect", "raw": "๐Ÿ’ก Connect", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐ŸŒ MOUSE-I Gallery", "raw": "๐ŸŒ MOUSE-I Gallery", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```\nhttps://huggingface.co/spaces/VIDraft/mouse1\n```", "href": null, "resource": null, "url": null, "code": "https://huggingface.co/spaces/VIDraft/mouse1", "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ’ฌ discord.gg/openfreeai", "raw": "๐Ÿ’ฌ discord.gg/openfreeai", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“ง [email protected]", "raw": "๐Ÿ“ง [email protected]", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
MOUSE-I: Transform a Prompt into a Live Web Service "From Prompt to Global Service in 60 Seconds" The Future of Web Development MOUSE-I revolutionizes web development by converting a single prompt into a fully functional, globally deployed web service through AI automation and enterprise-grade infrastructure. โšก Lightning-Fast Pipeline (60 Seconds) 1. AI Prompt Enhancement (5s) Instant requirement analysis Tech stack optimization Development spec generation 2. Code Creation (49s) Production-ready code Responsive design Performance-optimized 3. Live Rendering (1s) Instant visualization Real-time testing 4. Global Deployment (5s) Vercel infrastructure Global CDN Automatic HTTPS ๐ŸŽฏ Key Differentiators Instant Results: From idea to live URL in 60 seconds Enterprise Quality: Production-grade code and infrastructure Zero Configuration: No setup or technical knowledge required 40+ Templates: Ready-to-use solutions for games, dashboards, and apps ๐Ÿ’ซ Perfect For Startups needing quick MVPs Developers prototyping ideas Non-technical founders building web services Educators creating interactive tools ๐Ÿš€ Get Started Visit MOUSE-I Gallery Enter your prompt Get your live service in 60 seconds ๐Ÿ’ก Connect ๐ŸŒ MOUSE-I Gallery ``` https://huggingface.co/spaces/VIDraft/mouse1 ``` ๐Ÿ’ฌ discord.gg/openfreeai ๐Ÿ“ง [email protected]
{ "avatarUrl": "/avatars/e83b4373ec080aff5f69168bc78c137e.svg", "fullname": "openfree", "name": "openfree", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 24, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/d4_yFEVrVoZMnAfrGyqNb.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/PW2ftHIFbuje6tenUz5S8.gif" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/WOkTirUI2a9R2SeZxPWPn.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/FZ_6zyxYZl0_kmpq2FIe7.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/kEp2Jea0ep-GT_aoG61Z7.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/7qBMhALH2KeIjH4xmTH0Y.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/H4bh-GYWzlOmRLl3WNjDD.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/gKn40LsiWPgCMgSi-gcMN.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/pvKeDAbqHXZYsyhNaet7o.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/eKQa_1BJnoy8plrgx8NKZ.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/qKaUw0Zz9iBJnR5xdi_1C.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/AnFewLoHrP_QVL8MT2k_J.png" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "openfree", "seawolf2357", "fantos", "aiqcamp", "fantaxy", "aiqtech", "pawarts001", "clem", "britny", "Fishfishfishfishfish" ], "count": 10 }, { "reaction": "๐Ÿš€", "users": [ "openfree", "seawolf2357", "fantos", "aiqcamp", "John6666", "clem" ], "count": 6 }, { "reaction": "๐Ÿ‘€", "users": [ "openfree", "seawolf2357", "fantos", "aiqcamp", "fantaxy" ], "count": 5 }, { "reaction": "โค๏ธ", "users": [ "openfree", "seawolf2357", "fantos", "aiqcamp" ], "count": 4 }, { "reaction": "๐Ÿง ", "users": [ "openfree", "seawolf2357", "fantos", "fantaxy" ], "count": 4 }, { "reaction": "๐Ÿ˜Ž", "users": [ "openfree", "seawolf2357", "fantos", "fantaxy" ], "count": 4 }, { "reaction": "โž•", "users": [ "openfree", "seawolf2357", "fantos" ], "count": 3 }, { "reaction": "๐Ÿค—", "users": [ "openfree", "seawolf2357", "fantos" ], "count": 3 }, { "reaction": "๐Ÿคฏ", "users": [ "openfree", "seawolf2357", "fantaxy" ], "count": 3 }, { "reaction": "๐Ÿค", "users": [ "openfree", "seawolf2357", "fantaxy" ], "count": 3 }, { "reaction": "๐Ÿ˜”", "users": [ "openfree", "seawolf2357" ], "count": 2 }, { "reaction": "๐Ÿ‘", "users": [ "openfree", "seawolf2357" ], "count": 2 } ]
2024-11-21T02:36:42.000Z
2024-11-21T02:48:14.993Z
[ { "avatarUrl": "/avatars/a45d25cafbb39b1147a694643d17799e.svg", "fullname": "master", "name": "fantos", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 15, "isFollowing": false } ]
/posts/openfree/818749713246386
1,249
1
521561593383165
[ { "type": "text", "value": "Want to validate some hparams or figure out what ", "raw": "Want to validate some hparams or figure out what ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`timm`", "href": null, "resource": null, "url": null, "code": "timm", "user": null, "label": null, "lang": null }, { "type": "text", "value": " model to use before commiting to download or training with a large dataset? Try mini-imagenet: ", "raw": " model to use before commiting to download or training with a large dataset? Try mini-imagenet: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/timm/mini-imagenet", "href": null, "resource": { "type": "dataset", "id": "timm/mini-imagenet", "discussionNum": null }, "url": "https://huggingface.co/datasets/timm/mini-imagenet", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I had this sitting on my drive and forgot where I pulled it together from. It's 100 classes of imagenet, 50k train and 10k val images (from ImageNet-1k train set), and 5k test images (from ImageNet-1k val set). 7.4GB instead of > 100GB for the full ImageNet-1k. This ver is not reduced resolution like some other 'mini' versions. Super easy to use with timm train/val scripts, checkout the dataset card.", "raw": "I had this sitting on my drive and forgot where I pulled it together from. It's 100 classes of imagenet, 50k train and 10k val images (from ImageNet-1k train set), and 5k test images (from ImageNet-1k val set). 7.4GB instead of > 100GB for the full ImageNet-1k. This ver is not reduced resolution like some other 'mini' versions. Super easy to use with timm train/val scripts, checkout the dataset card.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I often check fine-tuning with even smaller datasets like:", "raw": "I often check fine-tuning with even smaller datasets like:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " * ", "raw": " * ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/timm/resisc45", "href": null, "resource": { "type": "dataset", "id": "timm/resisc45", "discussionNum": null }, "url": "https://huggingface.co/datasets/timm/resisc45", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " * ", "raw": " * ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/timm/oxford-iiit-pet", "href": null, "resource": { "type": "dataset", "id": "timm/oxford-iiit-pet", "discussionNum": null }, "url": "https://huggingface.co/datasets/timm/oxford-iiit-pet", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "But those are a bit small to train any modest size model w/o starting from pretrained weights. ", "raw": "But those are a bit small to train any modest size model w/o starting from pretrained weights. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Want to validate some hparams or figure out what `timm` model to use before commiting to download or training with a large dataset? Try mini-imagenet: https://huggingface.co/datasets/timm/mini-imagenet I had this sitting on my drive and forgot where I pulled it together from. It's 100 classes of imagenet, 50k train and 10k val images (from ImageNet-1k train set), and 5k test images (from ImageNet-1k val set). 7.4GB instead of > 100GB for the full ImageNet-1k. This ver is not reduced resolution like some other 'mini' versions. Super easy to use with timm train/val scripts, checkout the dataset card. I often check fine-tuning with even smaller datasets like: * https://huggingface.co/datasets/timm/resisc45 * https://huggingface.co/datasets/timm/oxford-iiit-pet But those are a bit small to train any modest size model w/o starting from pretrained weights.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1667002643224-604a5184dca2c7ac7508b849.jpeg", "fullname": "Ross Wightman", "name": "rwightman", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 221, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿš€", "users": [ "bryant1410", "John6666", "byoussef", "davanstrien", "clem" ], "count": 5 } ]
2024-11-20T23:40:27.000Z
2024-11-20T23:42:22.958Z
[]
/posts/rwightman/521561593383165
981
0
234016029667356
[ { "type": "text", "value": "๐Ÿš€ DeepSeek just dropped DeepSeek-R1-Lite-Preview with โ€œreasoningโ€ capacity. ", "raw": "๐Ÿš€ DeepSeek just dropped DeepSeek-R1-Lite-Preview with โ€œreasoningโ€ capacity. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Matches OpenAI o1-preview on AIME & MATH benchmarks.", "raw": "- Matches OpenAI o1-preview on AIME & MATH benchmarks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Transparent process output", "raw": "- Transparent process output", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Open-source model to be released", "raw": "- Open-source model to be released", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Try it out: ", "raw": "Try it out: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://chat.deepseek.com/", "href": "https://chat.deepseek.com/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿš€ DeepSeek just dropped DeepSeek-R1-Lite-Preview with โ€œreasoningโ€ capacity. - Matches OpenAI o1-preview on AIME & MATH benchmarks. - Transparent process output - Open-source model to be released Try it out: https://chat.deepseek.com/
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 384, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/yk1CjBQVwfy2bDrxUd6Ls.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "devops724", "clem", "fgdrfgrgrdgdr", "MustaphaLargou25", "OmbelineM" ], "count": 5 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "davanstrien", "yuchenxie", "clem" ], "count": 4 }, { "reaction": "๐Ÿคฏ", "users": [ "yuchenxie", "clem", "Syrus2" ], "count": 3 } ]
2024-11-20T22:49:41.000Z
2024-11-20T22:49:41.473Z
[]
/posts/fdaudens/234016029667356
1,334
0
873720319675748
[ { "type": "text", "value": "๐ŸŽ‰ Weโ€™re excited to announce, in collaboration with ", "raw": "๐ŸŽ‰ Weโ€™re excited to announce, in collaboration with ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@kaleidophon", "href": null, "resource": null, "url": null, "code": null, "user": "kaleidophon", "label": null, "lang": null }, { "type": "text", "value": " , the release of the models from our Apricot ๐Ÿ‘ paper, \"Apricot: Calibrating Large Language Models Using Their Generations Only,\" accepted at ACL 2024! Reproducibility is essential in science, and we've worked hard to make it as seamless as possible.", "raw": " , the release of the models from our Apricot ๐Ÿ‘ paper, \"Apricot: Calibrating Large Language Models Using Their Generations Only,\" accepted at ACL 2024! Reproducibility is essential in science, and we've worked hard to make it as seamless as possible.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/parameterlab/apricot-models-673d2cae40b6ff437a86f0bf", "href": null, "resource": { "type": "collection", "id": "parameterlab/apricot-models-673d2cae40b6ff437a86f0bf", "discussionNum": null }, "url": "https://huggingface.co/collections/parameterlab/apricot-models-673d2cae40b6ff437a86f0bf", "code": null, "user": null, "label": null, "lang": null } ]
๐ŸŽ‰ Weโ€™re excited to announce, in collaboration with @kaleidophon , the release of the models from our Apricot ๐Ÿ‘ paper, "Apricot: Calibrating Large Language Models Using Their Generations Only," accepted at ACL 2024! Reproducibility is essential in science, and we've worked hard to make it as seamless as possible. https://huggingface.co/collections/parameterlab/apricot-models-673d2cae40b6ff437a86f0bf
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1676555600618-noauth.jpeg", "fullname": "Martin Gubri", "name": "mgubri", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1669982276443-noauth.jpeg", "fullname": "Dennis Ulmer", "name": "kaleidophon", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 5 } ]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "Sri-Vigneshwar-DJ", "John6666", "kaleidophon", "clem" ], "count": 4 } ]
2024-11-20T20:00:14.000Z
2024-11-20T20:00:14.591Z
[]
/posts/mgubri/873720319675748
885
0
770595143636260
[ { "type": "text", "value": "When the XetHub crew joined Hugging Face this fall, ", "raw": "When the XetHub crew joined Hugging Face this fall, ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@erinys", "href": null, "resource": null, "url": null, "code": null, "user": "erinys", "label": null, "lang": null }, { "type": "text", "value": " and I started brainstorming how to share our work to replace Git LFS on the Hub. Uploading and downloading large models and datasets takes precious time. Thatโ€™s where our chunk-based approach comes in.", "raw": " and I started brainstorming how to share our work to replace Git LFS on the Hub. Uploading and downloading large models and datasets takes precious time. Thatโ€™s where our chunk-based approach comes in.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Instead of versioning files (like Git and Git LFS), we version variable-sized chunks of data. For the Hugging Face community, this means:", "raw": "Instead of versioning files (like Git and Git LFS), we version variable-sized chunks of data. For the Hugging Face community, this means:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โฉ Only upload the chunks that changed.", "raw": "โฉ Only upload the chunks that changed.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿš€ Download just the updates, not the whole file.", "raw": "๐Ÿš€ Download just the updates, not the whole file.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿง  We store your file as deduplicated chunks", "raw": "๐Ÿง  We store your file as deduplicated chunks", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In our benchmarks, we found that using CDC to store iterative model and dataset version led to transfer speedups of ~2x, but this isnโ€™t just a performance boost. Itโ€™s a rethinking of how we manage models and datasets on the Hub.", "raw": "In our benchmarks, we found that using CDC to store iterative model and dataset version led to transfer speedups of ~2x, but this isnโ€™t just a performance boost. Itโ€™s a rethinking of how we manage models and datasets on the Hub.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We're planning on our new storage backend to the Hub in early 2025 - check out our blog to dive deeper, and let us know: how could this improve your workflows?", "raw": "We're planning on our new storage backend to the Hub in early 2025 - check out our blog to dive deeper, and let us know: how could this improve your workflows?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/from-files-to-chunks", "href": "https://huggingface.co/blog/from-files-to-chunks", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
When the XetHub crew joined Hugging Face this fall, @erinys and I started brainstorming how to share our work to replace Git LFS on the Hub. Uploading and downloading large models and datasets takes precious time. Thatโ€™s where our chunk-based approach comes in. Instead of versioning files (like Git and Git LFS), we version variable-sized chunks of data. For the Hugging Face community, this means: โฉ Only upload the chunks that changed. ๐Ÿš€ Download just the updates, not the whole file. ๐Ÿง  We store your file as deduplicated chunks In our benchmarks, we found that using CDC to store iterative model and dataset version led to transfer speedups of ~2x, but this isnโ€™t just a performance boost. Itโ€™s a rethinking of how we manage models and datasets on the Hub. We're planning on our new storage backend to the Hub in early 2025 - check out our blog to dive deeper, and let us know: how could this improve your workflows? https://huggingface.co/blog/from-files-to-chunks
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65d50e9ef9cbfa798c590004/FlVe8chafigMfrPpMeJRL.jpeg", "fullname": "Jared Sulzdorf", "name": "jsulz", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 47, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/66b05ca6e7c57eac7cafbbc4/nddUkS3xu78cxCS-r7-xB.jpeg", "fullname": "Ann Huang", "name": "erinys", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 27 } ]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "assafvayner", "andrewrreed", "garrethlee", "Sri-Vigneshwar-DJ", "port8080", "victor", "shawon", "John6666", "Joseph717171", "julien-c", "davidberenstein1957", "Norod78", "johnlockejrr", "davanstrien", "erinys", "BrigitteTousi", "clem", "Dref360", "ZennyKenny" ], "count": 19 }, { "reaction": "โค๏ธ", "users": [ "assafvayner", "andrewrreed", "Joseph717171", "julien-c", "davidberenstein1957", "BrigitteTousi", "clem", "Dref360" ], "count": 8 }, { "reaction": "๐Ÿง ", "users": [ "assafvayner", "ArthurZ", "Joseph717171", "davidberenstein1957", "BrigitteTousi", "clem", "Dref360" ], "count": 7 } ]
2024-11-20T19:20:17.000Z
2024-11-20T19:20:33.190Z
[]
/posts/jsulz/770595143636260
2,792
0
372927141882137
[ { "type": "text", "value": "Discussion on Emancipation on X ", "raw": "Discussion on Emancipation on X ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@https", "href": null, "resource": null, "url": null, "code": null, "user": "https", "label": null, "lang": null }, { "type": "text", "value": "://x.com/i/spaces/1vAxROEmOzkKl in 15-20mns join if you want", "raw": "://x.com/i/spaces/1vAxROEmOzkKl in 15-20mns join if you want", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Discussion on Emancipation on X @https://x.com/i/spaces/1vAxROEmOzkKl in 15-20mns join if you want
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6593502ca2607099284523db/AmC0JuUV3_yk74ETYh_fI.png", "fullname": "william marshall", "name": "fuzzy-mittenz", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 16, "isFollowing": false }
[]
[ { "avatarUrl": "/avatars/dee522de38405600117493c1aaa3059d.svg", "fullname": "Jeremias Weihmann", "name": "Https", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null } ]
[]
2024-11-20T18:41:54.000Z
2024-11-20T20:33:27.350Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6593502ca2607099284523db/AmC0JuUV3_yk74ETYh_fI.png", "fullname": "william marshall", "name": "fuzzy-mittenz", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 16, "isFollowing": false } ]
/posts/fuzzy-mittenz/372927141882137
263
2
108642255859843
[ { "type": "text", "value": "Google's revamped Machine Learning Crash Course covers the recent advances in AI, with an increased focus on interactive learning. ", "raw": "Google's revamped Machine Learning Crash Course covers the recent advances in AI, with an increased focus on interactive learning. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“ 100+ exercises", "raw": "๐Ÿ“ 100+ exercises", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ—‚ 12 modules", "raw": "๐Ÿ—‚ 12 modules", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ•’ 15 hours", "raw": "๐Ÿ•’ 15 hours", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“น Video explainers of ML concepts", "raw": "๐Ÿ“น Video explainers of ML concepts", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐ŸŒŽ Real-world examples", "raw": "๐ŸŒŽ Real-world examples", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“Š Interactive visualizations", "raw": "๐Ÿ“Š Interactive visualizations", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Ref:", "raw": "Ref:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://developers.google.com/machine-learning/crash-course", "href": "https://developers.google.com/machine-learning/crash-course", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Google's revamped Machine Learning Crash Course covers the recent advances in AI, with an increased focus on interactive learning. ๐Ÿ“ 100+ exercises ๐Ÿ—‚ 12 modules ๐Ÿ•’ 15 hours ๐Ÿ“น Video explainers of ML concepts ๐ŸŒŽ Real-world examples ๐Ÿ“Š Interactive visualizations Ref: https://developers.google.com/machine-learning/crash-course
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64808a8c856901b0edb96245/UVa3ztQ8DRM47S8Rsk4Rz.jpeg", "fullname": "John Johnson", "name": "jjokah", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 8, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64808a8c856901b0edb96245/ejAM9a27e_l4JvbfSPeu1.png" } ]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "Jayachandran1", "John6666", "jjokah", "daniel-ltw" ], "count": 4 }, { "reaction": "๐Ÿ”ฅ", "users": [ "Shahrokhpk" ], "count": 1 } ]
2024-11-20T17:43:10.000Z
2024-11-20T17:43:10.202Z
[]
/posts/jjokah/108642255859843
744
0
115769736203215
[ { "type": "mention", "value": null, "raw": "@AdinaY", "href": null, "resource": null, "url": null, "code": null, "user": "AdinaY", "label": null, "lang": null }, { "type": "text", "value": " I hope this message find you well. Can any author upload the paper without official review in HuggingFace Daily Paper? If so, how to confirm the quality of the paper.", "raw": " I hope this message find you well. Can any author upload the paper without official review in HuggingFace Daily Paper? If so, how to confirm the quality of the paper.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
@AdinaY I hope this message find you well. Can any author upload the paper without official review in HuggingFace Daily Paper? If so, how to confirm the quality of the paper.
{ "avatarUrl": "/avatars/e84aed6b55b0554ad9581ae3c138a16a.svg", "fullname": "AIRobotZ", "name": "AIRobotZ", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63a369d98c0c89dcae3b8329/6OUJ7Hc9T1jXynYH3FGaf.png", "fullname": "Adina Yakefu", "name": "AdinaY", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 240 } ]
[ { "reaction": "๐Ÿ‘€", "users": [ "AdinaY" ], "count": 1 } ]
2024-09-29T04:46:18.000Z
2024-10-01T08:13:15.309Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63a369d98c0c89dcae3b8329/6OUJ7Hc9T1jXynYH3FGaf.png", "fullname": "Adina Yakefu", "name": "AdinaY", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 240, "isFollowing": false } ]
/posts/AIRobotZ/115769736203215
540
1
588508239551323
[ { "type": "text", "value": "700m parameters are the sweet spot for cpu usage, please let's make more of those!", "raw": "700m parameters are the sweet spot for cpu usage, please let's make more of those!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
700m parameters are the sweet spot for cpu usage, please let's make more of those!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a813dedbb9e28866a91b27/zs-RWFuXs17IfPUhxQaei.jpeg", "fullname": "appvoid", "name": "appvoid", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 35, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62a813dedbb9e28866a91b27/hm_k3AniNeOjxPKqdmKRU.png" } ]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "datasciguy", "Josephgflowers", "Best-codes", "jgitsolutions", "Felladrin", "Tonic", "Norod78", "jordivcb", "Xmen963", "louisbrulenaudet", "nicolay-r" ], "count": 11 }, { "reaction": "๐Ÿ˜Ž", "users": [ "Best-codes", "John6666", "Tonic", "Xmen963" ], "count": 4 }, { "reaction": "โค๏ธ", "users": [ "Xmen963" ], "count": 1 } ]
2024-09-28T16:12:02.000Z
2024-09-28T20:44:32.966Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6463041c960c80d19c679845/lmww1RzZB5NzdeSRbsB36.png", "fullname": "Jason Miller", "name": "datasciguy", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a813dedbb9e28866a91b27/zs-RWFuXs17IfPUhxQaei.jpeg", "fullname": "appvoid", "name": "appvoid", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 35, "isFollowing": false } ]
/posts/appvoid/588508239551323
3,303
2
120402239772964
[ { "type": "text", "value": "๐Ÿ“ข What if character emotion has a discrete nature? [0 ... 10]. Thanks to ", "raw": "๐Ÿ“ข What if character emotion has a discrete nature? [0 ... 10]. Thanks to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@RicardoLee", "href": null, "resource": null, "url": null, "code": null, "user": "RicardoLee", "label": null, "lang": null }, { "type": "text", "value": " @ Newcastle University by proposing contrastive reasoning calibration (CRC) and \"Role-play\" concepts in LLM prompting to fine-tune LLaMA-3-8B ๐Ÿ”ฅ dedicated for predicting degrees of authors โœ emotions and empathy expressions in social media text passages ๐Ÿ“š With more on that, happy to share the recorded video presentation by Ricardo Lee: ", "raw": " @ Newcastle University by proposing contrastive reasoning calibration (CRC) and \"Role-play\" concepts in LLM prompting to fine-tune LLaMA-3-8B ๐Ÿ”ฅ dedicated for predicting degrees of authors โœ emotions and empathy expressions in social media text passages ๐Ÿ“š With more on that, happy to share the recorded video presentation by Ricardo Lee: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/Op4Py9nI8_Q", "href": "https://youtu.be/Op4Py9nI8_Q", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "model card: ", "raw": "model card: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/RicardoLee/WASSA2024_EmpathyDetection_Chinchunmei_EXP305", "href": null, "resource": { "type": "model", "id": "RicardoLee/WASSA2024_EmpathyDetection_Chinchunmei_EXP305", "discussionNum": null }, "url": "https://huggingface.co/RicardoLee/WASSA2024_EmpathyDetection_Chinchunmei_EXP305", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "space: ", "raw": "space: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/nicolay-r/emotions-extraction-665ba47a20dee2925d607a40", "href": null, "resource": { "type": "collection", "id": "nicolay-r/emotions-extraction-665ba47a20dee2925d607a40", "discussionNum": null }, "url": "https://huggingface.co/collections/nicolay-r/emotions-extraction-665ba47a20dee2925d607a40", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper: ", "raw": "Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://aclanthology.org/2024.wassa-1.39/", "href": "https://aclanthology.org/2024.wassa-1.39/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Poster: ", "raw": "Poster: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.dropbox.com/scl/fi/flnw0i5xcbrngk1iwliql/wassa-2024-poster-chinhumei.pdf?rlkey=rmgvbt1lhacifpu9r50seopfy&st=mkvev20h&dl=1", "href": "https://www.dropbox.com/scl/fi/flnw0i5xcbrngk1iwliql/wassa-2024-poster-chinhumei.pdf?rlkey=rmgvbt1lhacifpu9r50seopfy&st=mkvev20h&dl=1", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "X/Twitter: ", "raw": "X/Twitter: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://x.com/nicolayr_/status/1814731377012559961", "href": "https://x.com/nicolayr_/status/1814731377012559961", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿ“ข What if character emotion has a discrete nature? [0 ... 10]. Thanks to @RicardoLee @ Newcastle University by proposing contrastive reasoning calibration (CRC) and "Role-play" concepts in LLM prompting to fine-tune LLaMA-3-8B ๐Ÿ”ฅ dedicated for predicting degrees of authors โœ emotions and empathy expressions in social media text passages ๐Ÿ“š With more on that, happy to share the recorded video presentation by Ricardo Lee: https://youtu.be/Op4Py9nI8_Q model card: https://huggingface.co/RicardoLee/WASSA2024_EmpathyDetection_Chinchunmei_EXP305 space: https://huggingface.co/collections/nicolay-r/emotions-extraction-665ba47a20dee2925d607a40 Paper: https://aclanthology.org/2024.wassa-1.39/ Poster: https://www.dropbox.com/scl/fi/flnw0i5xcbrngk1iwliql/wassa-2024-poster-chinhumei.pdf?rlkey=rmgvbt1lhacifpu9r50seopfy&st=mkvev20h&dl=1 X/Twitter: https://x.com/nicolayr_/status/1814731377012559961
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e62d11d27a8292c3637f86/aptDeBHpCJxcREj6KPLN1.jpeg", "fullname": "Nicolay Rusnachenko", "name": "nicolay-r", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 49, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/zAJybemT6E_yBOB4Gkos4.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/Li6Cxz3-gJLdQnNm6kZUQ.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/n760qaPvD7V6XgQJBEYdx.png" } ]
[ { "avatarUrl": "/avatars/4075d6e82df7f69e04d9910404719b86.svg", "fullname": "Tian Li", "name": "RicardoLee", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 14 } ]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "davidrd123", "baslak" ], "count": 3 }, { "reaction": "โค๏ธ", "users": [ "Tonic" ], "count": 1 }, { "reaction": "๐Ÿง ", "users": [ "Tonic" ], "count": 1 } ]
2024-09-28T15:30:45.000Z
2024-09-30T19:22:36.737Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6436279eaaef013d1af225c9/31yjIFpqfdvn_n9igumIU.png", "fullname": "Alignment Lab AI", "name": "Alignment-Lab-AI", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 131, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e62d11d27a8292c3637f86/aptDeBHpCJxcREj6KPLN1.jpeg", "fullname": "Nicolay Rusnachenko", "name": "nicolay-r", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 49, "isFollowing": false }, { "avatarUrl": "/avatars/4075d6e82df7f69e04d9910404719b86.svg", "fullname": "Tian Li", "name": "RicardoLee", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 14, "isFollowing": false } ]
/posts/nicolay-r/120402239772964
1,674
3
144011952681048
[ { "type": "text", "value": "Did some little experimentation to resize pre-trained LoRAs on Flux. I explored two themes:", "raw": "Did some little experimentation to resize pre-trained LoRAs on Flux. I explored two themes:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* Decrease the rank of a LoRA", "raw": "* Decrease the rank of a LoRA", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* Increase the rank of a LoRA", "raw": "* Increase the rank of a LoRA", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The first one is helpful in reducing memory requirements if the LoRA is of a high rank, while the second one is merely an experiment. Another implication of this study is in the unification of LoRA ranks when you would like to ", "raw": "The first one is helpful in reducing memory requirements if the LoRA is of a high rank, while the second one is merely an experiment. Another implication of this study is in the unification of LoRA ranks when you would like to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`torch.compile()`", "href": null, "resource": null, "url": null, "code": "torch.compile()", "user": null, "label": null, "lang": null }, { "type": "text", "value": " them. ", "raw": " them. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check it out here: ", "raw": "Check it out here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/sayakpaul/flux-lora-resizing", "href": null, "resource": { "type": "model", "id": "sayakpaul/flux-lora-resizing", "discussionNum": null }, "url": "https://huggingface.co/sayakpaul/flux-lora-resizing", "code": null, "user": null, "label": null, "lang": null } ]
Did some little experimentation to resize pre-trained LoRAs on Flux. I explored two themes: * Decrease the rank of a LoRA * Increase the rank of a LoRA The first one is helpful in reducing memory requirements if the LoRA is of a high rank, while the second one is merely an experiment. Another implication of this study is in the unification of LoRA ranks when you would like to `torch.compile()` them. Check it out here: https://huggingface.co/sayakpaul/flux-lora-resizing
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1649681653581-5f7fbd813e94f16a85448745.jpeg", "fullname": "Sayak Paul", "name": "sayakpaul", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 459, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5f7fbd813e94f16a85448745/xR-NZN2Vf11XzIzTvFhMC.png" } ]
[]
[ { "reaction": "๐Ÿš€", "users": [ "John6666", "YaTharThShaRma999", "AtAndDev", "Joseph717171", "Svngoku" ], "count": 5 }, { "reaction": "๐Ÿ”ฅ", "users": [ "John6666", "YaTharThShaRma999", "AtAndDev", "Joseph717171" ], "count": 4 }, { "reaction": "๐Ÿ‘", "users": [ "John6666", "AtAndDev", "Joseph717171", "louisbrulenaudet" ], "count": 4 }, { "reaction": "โค๏ธ", "users": [ "John6666", "AtAndDev", "Joseph717171" ], "count": 3 } ]
2024-09-28T02:51:26.000Z
2024-09-28T11:26:59.396Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false } ]
/posts/sayakpaul/144011952681048
2,606
1
133665812418603
[ { "type": "text", "value": "๐Ÿ‘‹ Hi Gradio community,", "raw": "๐Ÿ‘‹ Hi Gradio community,", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I'm excited to share that Gradio 5 will launch in October with improvements across security, performance, SEO, design (see the screenshot for Gradio 4 vs. Gradio 5), and user experience, making Gradio a mature framework for web-based ML applications.", "raw": "I'm excited to share that Gradio 5 will launch in October with improvements across security, performance, SEO, design (see the screenshot for Gradio 4 vs. Gradio 5), and user experience, making Gradio a mature framework for web-based ML applications.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Gradio 5 is currently in beta, so if you'd like to try it out early, please refer to the instructions below:", "raw": "Gradio 5 is currently in beta, so if you'd like to try it out early, please refer to the instructions below:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "---------- Installation -------------", "raw": "---------- Installation -------------", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Gradio 5 depends on Python 3.10 or higher, so if you are running Gradio locally, please ensure that you have Python 3.10 or higher, or download it here: ", "raw": "Gradio 5 depends on Python 3.10 or higher, so if you are running Gradio locally, please ensure that you have Python 3.10 or higher, or download it here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.python.org/downloads/", "href": "https://www.python.org/downloads/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* Locally: If you are running gradio locally, simply install the release candidate with ", "raw": "* Locally: If you are running gradio locally, simply install the release candidate with ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`pip install gradio --pre`", "href": null, "resource": null, "url": null, "code": "pip install gradio --pre", "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* Spaces: If you would like to update an existing gradio Space to use Gradio 5, you can simply update the ", "raw": "* Spaces: If you would like to update an existing gradio Space to use Gradio 5, you can simply update the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`sdk_version`", "href": null, "resource": null, "url": null, "code": "sdk_version", "user": null, "label": null, "lang": null }, { "type": "text", "value": " to be ", "raw": " to be ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`5.0.0b3`", "href": null, "resource": null, "url": null, "code": "5.0.0b3", "user": null, "label": null, "lang": null }, { "type": "text", "value": " in the ", "raw": " in the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`README.md`", "href": null, "resource": null, "url": null, "code": "README.md", "user": null, "label": null, "lang": null }, { "type": "text", "value": " file on Spaces.", "raw": " file on Spaces.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In most cases, thatโ€™s all you have to do to run Gradio 5.0. If you start your Gradio application, you should see your Gradio app running, with a fresh new UI.", "raw": "In most cases, thatโ€™s all you have to do to run Gradio 5.0. If you start your Gradio application, you should see your Gradio app running, with a fresh new UI.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "-----------------------------", "raw": "-----------------------------", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Fore more information, please see: ", "raw": "Fore more information, please see: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/gradio-app/gradio/issues/9463", "href": "https://github.com/gradio-app/gradio/issues/9463", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿ‘‹ Hi Gradio community, I'm excited to share that Gradio 5 will launch in October with improvements across security, performance, SEO, design (see the screenshot for Gradio 4 vs. Gradio 5), and user experience, making Gradio a mature framework for web-based ML applications. Gradio 5 is currently in beta, so if you'd like to try it out early, please refer to the instructions below: ---------- Installation ------------- Gradio 5 depends on Python 3.10 or higher, so if you are running Gradio locally, please ensure that you have Python 3.10 or higher, or download it here: https://www.python.org/downloads/ * Locally: If you are running gradio locally, simply install the release candidate with `pip install gradio --pre` * Spaces: If you would like to update an existing gradio Space to use Gradio 5, you can simply update the `sdk_version` to be `5.0.0b3` in the `README.md` file on Spaces. In most cases, thatโ€™s all you have to do to run Gradio 5.0. If you start your Gradio application, you should see your Gradio app running, with a fresh new UI. ----------------------------- Fore more information, please see: https://github.com/gradio-app/gradio/issues/9463
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1621947938344-noauth.png", "fullname": "Abubakar Abid", "name": "abidlabs", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 487, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/608b8bb39d7c9519b4adae19/hZ6QTf9oOCteTlAzJhSyp.png" } ]
[]
[ { "reaction": "โค๏ธ", "users": [ "Csplk", "DmitryRyumin", "tonyassi", "clem", "CaioXapelaum", "adamelliotfields", "Tonic", "katonsa", "brainhome", "KingNish", "Best-codes", "darkzbaron", "jyoung105", "louisbrulenaudet", "cognative", "fahim1233", "TheAwakenOne" ], "count": 17 }, { "reaction": "๐Ÿ”ฅ", "users": [ "CaioXapelaum", "Tonic", "rwightman" ], "count": 3 }, { "reaction": "๐Ÿš€", "users": [ "CaioXapelaum", "Tonic", "rwightman" ], "count": 3 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "Tonic", "ProdeusUnity" ], "count": 3 }, { "reaction": "๐Ÿ˜Ž", "users": [ "Best-codes" ], "count": 1 } ]
2024-09-27T18:56:53.000Z
2024-09-28T03:43:21.100Z
[ { "avatarUrl": "/avatars/591252948eb38a09b9907239ceaca520.svg", "fullname": "Mishl", "name": "mishl", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false } ]
/posts/abidlabs/133665812418603
4,219
2
231609310386239
[ { "type": "text", "value": "I'm thrilled to share that Iโ€™ve just released the Contextual Multi-Armed Bandits Library, a comprehensive Python toolkit that brings together a suite of both contextual and non-contextual bandit algorithms. Whether you're delving into reinforcement learning research or building practical applications, this library is designed to accelerate your work.", "raw": "I'm thrilled to share that Iโ€™ve just released the Contextual Multi-Armed Bandits Library, a comprehensive Python toolkit that brings together a suite of both contextual and non-contextual bandit algorithms. Whether you're delving into reinforcement learning research or building practical applications, this library is designed to accelerate your work.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What's Inside:", "raw": "What's Inside:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Contextual Algorithms:", "raw": "- Contextual Algorithms:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- LinUCB", "raw": "- LinUCB", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Epsilon-Greedy", "raw": "- Epsilon-Greedy", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- KernelUCB", "raw": "- KernelUCB", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- NeuralLinearBandit", "raw": "- NeuralLinearBandit", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- DecisionTreeBandit", "raw": "- DecisionTreeBandit", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Non-Contextual Algorithms:", "raw": "- Non-Contextual Algorithms:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Upper Confidence Bound (UCB)", "raw": "- Upper Confidence Bound (UCB)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Thompson Sampling", "raw": "- Thompson Sampling", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Key Features:", "raw": "Key Features:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Modular Design: Easily integrate and customize algorithms for your specific needs.", "raw": "- Modular Design: Easily integrate and customize algorithms for your specific needs.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Comprehensive Documentation: Clear instructions and examples to get you started quickly.", "raw": "- Comprehensive Documentation: Clear instructions and examples to get you started quickly.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Educational Value: Ideal for learning and teaching concepts in reinforcement learning and decision-making under uncertainty.", "raw": "- Educational Value: Ideal for learning and teaching concepts in reinforcement learning and decision-making under uncertainty.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "GitHub Repository: ", "raw": "GitHub Repository: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/singhsidhukuldeep/contextual-bandits", "href": "https://github.com/singhsidhukuldeep/contextual-bandits", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "PyPi: ", "raw": "PyPi: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://pypi.org/project/contextual-bandits-algos/", "href": "https://pypi.org/project/contextual-bandits-algos/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I am eager to hear your feedback, contributions, and ideas. Feel free to open issues, submit pull requests, or fork the project to make it your own.", "raw": "I am eager to hear your feedback, contributions, and ideas. Feel free to open issues, submit pull requests, or fork the project to make it your own.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I'm thrilled to share that Iโ€™ve just released the Contextual Multi-Armed Bandits Library, a comprehensive Python toolkit that brings together a suite of both contextual and non-contextual bandit algorithms. Whether you're delving into reinforcement learning research or building practical applications, this library is designed to accelerate your work. What's Inside: - Contextual Algorithms: - LinUCB - Epsilon-Greedy - KernelUCB - NeuralLinearBandit - DecisionTreeBandit - Non-Contextual Algorithms: - Upper Confidence Bound (UCB) - Thompson Sampling Key Features: - Modular Design: Easily integrate and customize algorithms for your specific needs. - Comprehensive Documentation: Clear instructions and examples to get you started quickly. - Educational Value: Ideal for learning and teaching concepts in reinforcement learning and decision-making under uncertainty. GitHub Repository: https://github.com/singhsidhukuldeep/contextual-bandits PyPi: https://pypi.org/project/contextual-bandits-algos/ I am eager to hear your feedback, contributions, and ideas. Feel free to open issues, submit pull requests, or fork the project to make it your own.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/ipFG31DdQjeoIiO09R0PR.jpeg" } ]
[]
[ { "reaction": "๐Ÿš€", "users": [ "nwaeneh", "John6666", "AtAndDev", "louisbrulenaudet", "nicolay-r" ], "count": 5 } ]
2024-09-27T18:16:07.000Z
2024-09-27T18:16:07.349Z
[]
/posts/singhsidhukuldeep/231609310386239
1,499
0
731609423967788
[ { "type": "text", "value": "NEW RELEASE! We've brought Shining Valiant 2 to Llama 3.2!", "raw": "NEW RELEASE! We've brought Shining Valiant 2 to Llama 3.2!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/ValiantLabs/Llama3.2-3B-ShiningValiant2", "href": null, "resource": { "type": "model", "id": "ValiantLabs/Llama3.2-3B-ShiningValiant2", "discussionNum": null }, "url": "https://huggingface.co/ValiantLabs/Llama3.2-3B-ShiningValiant2", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " is trained on high-quality general chat and science-instruct data! Get it now :)", "raw": " is trained on high-quality general chat and science-instruct data! Get it now :)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "(Enigma's up next for 3b, that'll be out soon!)", "raw": "(Enigma's up next for 3b, that'll be out soon!)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Additionally, newly expanded versions of the following datasets are now available:", "raw": "Additionally, newly expanded versions of the following datasets are now available:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/sequelbox/Supernova", "href": null, "resource": { "type": "dataset", "id": "sequelbox/Supernova", "discussionNum": null }, "url": "https://huggingface.co/datasets/sequelbox/Supernova", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " is now 178k rows of high-quality synthetic general chat data.", "raw": " is now 178k rows of high-quality synthetic general chat data.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/sequelbox/Tachibana", "href": null, "resource": { "type": "dataset", "id": "sequelbox/Tachibana", "discussionNum": null }, "url": "https://huggingface.co/datasets/sequelbox/Tachibana", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " is now 104k rows of high-quality synthetic code-instruct data.", "raw": " is now 104k rows of high-quality synthetic code-instruct data.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "for everyone to use :)", "raw": "for everyone to use :)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "more soon", "raw": "more soon", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
NEW RELEASE! We've brought Shining Valiant 2 to Llama 3.2! https://huggingface.co/ValiantLabs/Llama3.2-3B-ShiningValiant2 is trained on high-quality general chat and science-instruct data! Get it now :) (Enigma's up next for 3b, that'll be out soon!) Additionally, newly expanded versions of the following datasets are now available: https://huggingface.co/datasets/sequelbox/Supernova is now 178k rows of high-quality synthetic general chat data. https://huggingface.co/datasets/sequelbox/Tachibana is now 104k rows of high-quality synthetic code-instruct data. for everyone to use :) more soon
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63444f2687964b331809eb55/WvZivsvKsM_t0tBtakovK.png", "fullname": "t.d.a.g.", "name": "sequelbox", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 51, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-09-27T18:08:26.000Z
2024-09-27T18:08:26.734Z
[]
/posts/sequelbox/731609423967788
491
0
922308939664000
[ { "type": "text", "value": "We've shipped new computer vision/multimodal tasks to Hugging Face Hub ๐Ÿซก ", "raw": "We've shipped new computer vision/multimodal tasks to Hugging Face Hub ๐Ÿซก ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Keypoint detection just landed with many docs, and goodies ๐ŸŽ", "raw": "Keypoint detection just landed with many docs, and goodies ๐ŸŽ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/models?pipeline_tag=keypoint-detection", "href": "https://huggingface.co/models?pipeline_tag=keypoint-detection", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In Hugging Face transformers we have SuperPoint, foundation model for keypoint detection, check out the demo here ", "raw": "In Hugging Face transformers we have SuperPoint, foundation model for keypoint detection, check out the demo here ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/merve/SuperPoint", "href": null, "resource": { "type": "space", "id": "merve/SuperPoint", "discussionNum": null }, "url": "https://huggingface.co/spaces/merve/SuperPoint", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Shipped transformers task guide on keypoint detection ", "raw": "Shipped transformers task guide on keypoint detection ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/docs/transformers/tasks/keypoint_detection", "href": "https://huggingface.co/docs/transformers/tasks/keypoint_detection", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ๐Ÿ“–", "raw": " ๐Ÿ“–", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Also shipped the task page ", "raw": "Also shipped the task page ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/tasks/keypoint-detection", "href": "https://huggingface.co/tasks/keypoint-detection", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " (easiest way to get started!) ๐Ÿ”–", "raw": " (easiest way to get started!) ๐Ÿ”–", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
We've shipped new computer vision/multimodal tasks to Hugging Face Hub ๐Ÿซก Keypoint detection just landed with many docs, and goodies ๐ŸŽ https://huggingface.co/models?pipeline_tag=keypoint-detection In Hugging Face transformers we have SuperPoint, foundation model for keypoint detection, check out the demo here https://huggingface.co/spaces/merve/SuperPoint Shipped transformers task guide on keypoint detection https://huggingface.co/docs/transformers/tasks/keypoint_detection ๐Ÿ“– Also shipped the task page https://huggingface.co/tasks/keypoint-detection (easiest way to get started!) ๐Ÿ”–
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png", "fullname": "Merve Noyan", "name": "merve", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5589, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6141a88b3a0ec78603c9e784/7Jl4pv1c67exOYj3WTuHO.png" } ]
[]
[ { "reaction": "โค๏ธ", "users": [ "amyeroberts", "clem", "OzzyGT", "not-lain", "Goekdeniz-Guelmez", "KingNish", "Tonic", "baslak", "nicolay-r", "jasstionzyf" ], "count": 10 }, { "reaction": "๐Ÿ”ฅ", "users": [ "graeb", "KingNish", "Tonic", "louisbrulenaudet" ], "count": 4 }, { "reaction": "๐Ÿš€", "users": [ "Aurelien-Morgan", "John6666", "Goekdeniz-Guelmez", "Tonic" ], "count": 4 } ]
2024-09-27T12:41:16.000Z
2024-09-27T12:42:07.199Z
[]
/posts/merve/922308939664000
2,757
0
847140728902912
[ { "type": "text", "value": "This is supercool!!", "raw": "This is supercool!!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "LlaVA-3D: adds 3D-awareness to LVMs without compromising 2D understanding capabilities.", "raw": "LlaVA-3D: adds 3D-awareness to LVMs without compromising 2D understanding capabilities.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Method: they developed a unified architecture that maps 2D clip patch features to their corresponding positions in 3D space - enabling joint 2D and 3D vision-language instruction tuning.", "raw": "Method: they developed a unified architecture that maps 2D clip patch features to their corresponding positions in 3D space - enabling joint 2D and 3D vision-language instruction tuning.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Project: ", "raw": "Project: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://zcmax.github.io/projects/LLaVA-3D/", "href": "https://zcmax.github.io/projects/LLaVA-3D/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
This is supercool!! LlaVA-3D: adds 3D-awareness to LVMs without compromising 2D understanding capabilities. Method: they developed a unified architecture that maps 2D clip patch features to their corresponding positions in 3D space - enabling joint 2D and 3D vision-language instruction tuning. Project: https://zcmax.github.io/projects/LLaVA-3D/
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg", "fullname": "Jaward Sesay", "name": "Jaward", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 191, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/cvh32xP8ksE0sWWLbbEe6.jpeg" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/XFVkrRH74xhBxIxXooFCh.mp4" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/vb2l-zQ_5vpAOp7dVlx1S.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/i_e8008zYXdpy5z1o_hLM.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/tcxIMUwyZtPZEkGa7FohZ.jpeg" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "Bchotov", "John6666", "kaykyramos", "davidberenstein1957", "paulmaksimovich", "clem", "AtAndDev", "DiamanteAmarelo" ], "count": 8 }, { "reaction": "๐Ÿš€", "users": [ "DiamanteAmarelo", "nicolay-r" ], "count": 2 } ]
2024-09-27T06:24:32.000Z
2024-09-27T06:24:32.233Z
[]
/posts/Jaward/847140728902912
1,944
0
300554611911292
[ { "type": "text", "value": "๐Ÿš€ 1,000,000 public models milestone achieved on Hugging Face! ๐Ÿคฏ", "raw": "๐Ÿš€ 1,000,000 public models milestone achieved on Hugging Face! ๐Ÿคฏ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This chart by ", "raw": "This chart by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@cfahlgren1", "href": null, "resource": null, "url": null, "code": null, "user": "cfahlgren1", "label": null, "lang": null }, { "type": "text", "value": " shows the explosive growth of open-source AI. It's not just about numbers - it's a thriving community combining cutting-edge ML with real-world applications. ", "raw": " shows the explosive growth of open-source AI. It's not just about numbers - it's a thriving community combining cutting-edge ML with real-world applications. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/cfahlgren1/hub-stats", "href": null, "resource": { "type": "space", "id": "cfahlgren1/hub-stats", "discussionNum": null }, "url": "https://huggingface.co/spaces/cfahlgren1/hub-stats", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Can't wait to see what's next! ", "raw": "Can't wait to see what's next! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿš€ 1,000,000 public models milestone achieved on Hugging Face! ๐Ÿคฏ This chart by @cfahlgren1 shows the explosive growth of open-source AI. It's not just about numbers - it's a thriving community combining cutting-edge ML with real-world applications. https://huggingface.co/spaces/cfahlgren1/hub-stats Can't wait to see what's next!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 384, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/JIDS-ymF0xu2c20fH9Ers.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/648a374f00f7a3374ee64b99/YPwSOrronoozwHbJchPn3.jpeg", "fullname": "Caleb Fahlgren", "name": "cfahlgren1", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 123 } ]
[ { "reaction": "๐Ÿš€", "users": [ "cfahlgren1", "John6666", "sequelbox", "DmitryRyumin", "nroggendorff", "dillfrescott", "nothingRes", "davidberenstein1957", "clem", "erinmikail", "nicoboss", "Joseph717171", "taewan2002", "jsulz", "Best-codes", "aceeee", "ibrahim313", "jgitsolutions", "nazimali" ], "count": 19 }, { "reaction": "๐Ÿค—", "users": [ "Joseph717171", "taewan2002", "jsulz", "Best-codes", "louisbrulenaudet" ], "count": 5 }, { "reaction": "โค๏ธ", "users": [ "asigalov61" ], "count": 1 } ]
2024-09-26T22:39:46.000Z
2024-10-01T18:21:03.926Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/648a374f00f7a3374ee64b99/YPwSOrronoozwHbJchPn3.jpeg", "fullname": "Caleb Fahlgren", "name": "cfahlgren1", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 123, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f57ea2d3f32f12a3c0692e6/b-9GG2p--smCameUPeCBN.jpeg", "fullname": "Alex", "name": "asigalov61", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 65, "isFollowing": false } ]
/posts/fdaudens/300554611911292
3,339
2
144143912433557
[ { "type": "text", "value": "๐Ÿš€๐Ÿ•บ๐ŸŒŸ New Research Alert - ECCV 2024 (Avatars Collection)! ๐ŸŒŸ๐Ÿ’ƒ๐Ÿš€", "raw": "๐Ÿš€๐Ÿ•บ๐ŸŒŸ New Research Alert - ECCV 2024 (Avatars Collection)! ๐ŸŒŸ๐Ÿ’ƒ๐Ÿš€", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“„ Title: Expressive Whole-Body 3D Gaussian Avatar ๐Ÿ”", "raw": "๐Ÿ“„ Title: Expressive Whole-Body 3D Gaussian Avatar ๐Ÿ”", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“ Description: ExAvatar is a model that generates animatable 3D human avatars with facial expressions and hand movements from short monocular videos using a hybrid mesh and 3D Gaussian representation.", "raw": "๐Ÿ“ Description: ExAvatar is a model that generates animatable 3D human avatars with facial expressions and hand movements from short monocular videos using a hybrid mesh and 3D Gaussian representation.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ‘ฅ Authors: Gyeongsik Moon, Takaaki Shiratori, and ", "raw": "๐Ÿ‘ฅ Authors: Gyeongsik Moon, Takaaki Shiratori, and ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@psyth", "href": null, "resource": null, "url": null, "code": null, "user": "psyth", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“… Conference: ECCV, 29 Sep โ€“ 4 Oct, 2024 | Milano, Italy ๐Ÿ‡ฎ๐Ÿ‡น", "raw": "๐Ÿ“… Conference: ECCV, 29 Sep โ€“ 4 Oct, 2024 | Milano, Italy ๐Ÿ‡ฎ๐Ÿ‡น", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“„ Paper: ", "raw": "๐Ÿ“„ Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2407.08414", "href": null, "resource": { "type": "paper", "id": "2407.08414", "discussionNum": null }, "url": "https://huggingface.co/papers/2407.08414", "code": null, "user": null, "label": "MeshAvatar: Learning High-quality Triangular Human Avatars from\n Multi-view Videos (2407.08414)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“„ Paper: ", "raw": "๐Ÿ“„ Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2407.21686", "href": null, "resource": { "type": "paper", "id": "2407.21686", "discussionNum": null }, "url": "https://huggingface.co/papers/2407.21686", "code": null, "user": null, "label": "Expressive Whole-Body 3D Gaussian Avatar (2407.21686)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐ŸŒ Github Page: ", "raw": "๐ŸŒ Github Page: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://mks0601.github.io/ExAvatar", "href": "https://mks0601.github.io/ExAvatar", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“ Repository: ", "raw": "๐Ÿ“ Repository: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/mks0601/ExAvatar_RELEASE", "href": "https://github.com/mks0601/ExAvatar_RELEASE", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿš€ CVPR-2023-24-Papers: ", "raw": "๐Ÿš€ CVPR-2023-24-Papers: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/DmitryRyumin/CVPR-2023-24-Papers", "href": "https://github.com/DmitryRyumin/CVPR-2023-24-Papers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿš€ WACV-2024-Papers: ", "raw": "๐Ÿš€ WACV-2024-Papers: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/DmitryRyumin/WACV-2024-Papers", "href": "https://github.com/DmitryRyumin/WACV-2024-Papers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿš€ ICCV-2023-Papers: ", "raw": "๐Ÿš€ ICCV-2023-Papers: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/DmitryRyumin/ICCV-2023-Papers", "href": "https://github.com/DmitryRyumin/ICCV-2023-Papers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“š More Papers: more cutting-edge research presented at other conferences in the ", "raw": "๐Ÿ“š More Papers: more cutting-edge research presented at other conferences in the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers", "href": null, "resource": { "type": "space", "id": "DmitryRyumin/NewEraAI-Papers", "discussionNum": null }, "url": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " curated by ", "raw": " curated by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@DmitryRyumin", "href": null, "resource": null, "url": null, "code": null, "user": "DmitryRyumin", "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿš€ Added to the Avatars Collection: ", "raw": "๐Ÿš€ Added to the Avatars Collection: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "href": null, "resource": { "type": "collection", "id": "DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "discussionNum": null }, "url": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ” Keywords: #ExAvatar #3DAvatar #FacialExpressions #HandMotions #MonocularVideo #3DModeling #GaussianSplatting #MachineLearning #ComputerVision #ComputerGraphics #DeepLearning #AI #ECCV2024", "raw": "๐Ÿ” Keywords: #ExAvatar #3DAvatar #FacialExpressions #HandMotions #MonocularVideo #3DModeling #GaussianSplatting #MachineLearning #ComputerVision #ComputerGraphics #DeepLearning #AI #ECCV2024", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿš€๐Ÿ•บ๐ŸŒŸ New Research Alert - ECCV 2024 (Avatars Collection)! ๐ŸŒŸ๐Ÿ’ƒ๐Ÿš€ ๐Ÿ“„ Title: Expressive Whole-Body 3D Gaussian Avatar ๐Ÿ” ๐Ÿ“ Description: ExAvatar is a model that generates animatable 3D human avatars with facial expressions and hand movements from short monocular videos using a hybrid mesh and 3D Gaussian representation. ๐Ÿ‘ฅ Authors: Gyeongsik Moon, Takaaki Shiratori, and @psyth ๐Ÿ“… Conference: ECCV, 29 Sep โ€“ 4 Oct, 2024 | Milano, Italy ๐Ÿ‡ฎ๐Ÿ‡น ๐Ÿ“„ Paper: https://huggingface.co/papers/2407.08414 ๐Ÿ“„ Paper: https://huggingface.co/papers/2407.21686 ๐ŸŒ Github Page: https://mks0601.github.io/ExAvatar ๐Ÿ“ Repository: https://github.com/mks0601/ExAvatar_RELEASE ๐Ÿš€ CVPR-2023-24-Papers: https://github.com/DmitryRyumin/CVPR-2023-24-Papers ๐Ÿš€ WACV-2024-Papers: https://github.com/DmitryRyumin/WACV-2024-Papers ๐Ÿš€ ICCV-2023-Papers: https://github.com/DmitryRyumin/ICCV-2023-Papers ๐Ÿ“š More Papers: more cutting-edge research presented at other conferences in the https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers curated by @DmitryRyumin ๐Ÿš€ Added to the Avatars Collection: https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36 ๐Ÿ” Keywords: #ExAvatar #3DAvatar #FacialExpressions #HandMotions #MonocularVideo #3DModeling #GaussianSplatting #MachineLearning #ComputerVision #ComputerGraphics #DeepLearning #AI #ECCV2024
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg", "fullname": "Dmitry Ryumin", "name": "DmitryRyumin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 377, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/1JO_jBZZedVsZJzTG3Zg6.gif" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/lJ9eMw_nP-K_mNde-uy4e.gif" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/YtjofFsSAc0xbyFjHgIYr.gif" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/cqdAHJIockuC6N18lHxiF.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/j8s2R1YAhxKlWQJHI4vxe.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/t4OvsKQdKZsIFXACEa9my.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/nO_9x8ItCEJTeRBFqMXaE.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/2lNXvBFkAnVzlIoR5w_QR.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/ZU5en24HAmT837QXfB5x6.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg", "fullname": "Dmitry Ryumin", "name": "DmitryRyumin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 377 }, { "avatarUrl": "/avatars/1681e465d7649f67f94e1b69d236cb1e.svg", "fullname": "Shunsuke Saito", "name": "psyth", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2 } ]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "DmitryRyumin", "John6666", "edgar222", "davidberenstein1957", "xepile", "art-bashkirev" ], "count": 6 }, { "reaction": "๐Ÿค—", "users": [ "DmitryRyumin", "edgar222", "davidberenstein1957", "sunnyient", "art-bashkirev" ], "count": 5 } ]
2024-09-26T18:23:14.000Z
2024-09-26T18:23:14.692Z
[]
/posts/DmitryRyumin/144143912433557
1,964
0
384487520549880
[ { "type": "text", "value": "Good folks at Meta has just unveiled Llama 3.2, pushing the boundaries of language models and computer vision.", "raw": "Good folks at Meta has just unveiled Llama 3.2, pushing the boundaries of language models and computer vision.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Even more interesting is how they trained this cutting-edge model:", "raw": "Even more interesting is how they trained this cutting-edge model:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1๏ธโƒฃ Architecture:", "raw": "1๏ธโƒฃ Architecture:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Llama 3.2 uses an optimized transformer architecture with auto-regressive capabilities. The largest models (11B and 90B) now support multimodal inputs, integrating both text and images.", "raw": "Llama 3.2 uses an optimized transformer architecture with auto-regressive capabilities. The largest models (11B and 90B) now support multimodal inputs, integrating both text and images.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2๏ธโƒฃ Training Pipeline:", "raw": "2๏ธโƒฃ Training Pipeline:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Started with pretrained Llama 3.1 text models", "raw": "โ€ข Started with pretrained Llama 3.1 text models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Added image adapters and encoders", "raw": "โ€ข Added image adapters and encoders", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Pretrained on large-scale noisy (image, text) pair data", "raw": "โ€ข Pretrained on large-scale noisy (image, text) pair data", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Fine-tuned on high-quality in-domain and knowledge-enhanced (image, text) pairs", "raw": "โ€ข Fine-tuned on high-quality in-domain and knowledge-enhanced (image, text) pairs", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3๏ธโƒฃ Vision Integration:", "raw": "3๏ธโƒฃ Vision Integration:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Trained adapter weights to integrate a pre-trained image encoder", "raw": "โ€ข Trained adapter weights to integrate a pre-trained image encoder", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Used cross-attention layers to feed image representations into the language model", "raw": "โ€ข Used cross-attention layers to feed image representations into the language model", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Preserved text-only capabilities by not updating language model parameters during adapter training", "raw": "โ€ข Preserved text-only capabilities by not updating language model parameters during adapter training", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4๏ธโƒฃ Post-Training Alignment:", "raw": "4๏ธโƒฃ Post-Training Alignment:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Multiple rounds of supervised fine-tuning (SFT)", "raw": "โ€ข Multiple rounds of supervised fine-tuning (SFT)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Rejection sampling (RS)", "raw": "โ€ข Rejection sampling (RS)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Direct preference optimization (DPO)", "raw": "โ€ข Direct preference optimization (DPO)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Synthetic data generation using Llama 3.1 for Q&A augmentation", "raw": "โ€ข Synthetic data generation using Llama 3.1 for Q&A augmentation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Reward model ranking for high-quality fine-tuning data", "raw": "โ€ข Reward model ranking for high-quality fine-tuning data", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "5๏ธโƒฃ Lightweight Models:", "raw": "5๏ธโƒฃ Lightweight Models:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Used pruning and distillation techniques for 1B and 3B models", "raw": "โ€ข Used pruning and distillation techniques for 1B and 3B models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Structured pruning from Llama 3.1 8B model", "raw": "โ€ข Structured pruning from Llama 3.1 8B model", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Knowledge distillation using Llama 3.1 8B and 70B as teachers", "raw": "โ€ข Knowledge distillation using Llama 3.1 8B and 70B as teachers", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "6๏ธโƒฃ Context Length:", "raw": "6๏ธโƒฃ Context Length:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "All models support an impressive 128K token context length.", "raw": "All models support an impressive 128K token context length.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "7๏ธโƒฃ Safety Measures:", "raw": "7๏ธโƒฃ Safety Measures:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Incorporated safety mitigation data to balance helpfulness and safety.", "raw": "Incorporated safety mitigation data to balance helpfulness and safety.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The result? A suite of models ranging from edge-friendly 1B parameters to powerful 90B parameter versions, capable of sophisticated reasoning across text and images. Llama 3.2 is set to revolutionize AI applications from mobile devices to enterprise-scale solutions.", "raw": "The result? A suite of models ranging from edge-friendly 1B parameters to powerful 90B parameter versions, capable of sophisticated reasoning across text and images. Llama 3.2 is set to revolutionize AI applications from mobile devices to enterprise-scale solutions.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What are your thoughts on these advancements? How do you see Llama 3.2 impacting your industry? Let's discuss in the comments!", "raw": "What are your thoughts on these advancements? How do you see Llama 3.2 impacting your industry? Let's discuss in the comments!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Good folks at Meta has just unveiled Llama 3.2, pushing the boundaries of language models and computer vision. Even more interesting is how they trained this cutting-edge model: 1๏ธโƒฃ Architecture: Llama 3.2 uses an optimized transformer architecture with auto-regressive capabilities. The largest models (11B and 90B) now support multimodal inputs, integrating both text and images. 2๏ธโƒฃ Training Pipeline: โ€ข Started with pretrained Llama 3.1 text models โ€ข Added image adapters and encoders โ€ข Pretrained on large-scale noisy (image, text) pair data โ€ข Fine-tuned on high-quality in-domain and knowledge-enhanced (image, text) pairs 3๏ธโƒฃ Vision Integration: โ€ข Trained adapter weights to integrate a pre-trained image encoder โ€ข Used cross-attention layers to feed image representations into the language model โ€ข Preserved text-only capabilities by not updating language model parameters during adapter training 4๏ธโƒฃ Post-Training Alignment: โ€ข Multiple rounds of supervised fine-tuning (SFT) โ€ข Rejection sampling (RS) โ€ข Direct preference optimization (DPO) โ€ข Synthetic data generation using Llama 3.1 for Q&A augmentation โ€ข Reward model ranking for high-quality fine-tuning data 5๏ธโƒฃ Lightweight Models: โ€ข Used pruning and distillation techniques for 1B and 3B models โ€ข Structured pruning from Llama 3.1 8B model โ€ข Knowledge distillation using Llama 3.1 8B and 70B as teachers 6๏ธโƒฃ Context Length: All models support an impressive 128K token context length. 7๏ธโƒฃ Safety Measures: Incorporated safety mitigation data to balance helpfulness and safety. The result? A suite of models ranging from edge-friendly 1B parameters to powerful 90B parameter versions, capable of sophisticated reasoning across text and images. Llama 3.2 is set to revolutionize AI applications from mobile devices to enterprise-scale solutions. What are your thoughts on these advancements? How do you see Llama 3.2 impacting your industry? Let's discuss in the comments!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/mlNuy6PU09plPJxUt6ugg.jpeg" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "fffiloni", "KingNish", "fsommers", "skalaliya", "John6666", "louisbrulenaudet", "adamelliotfields", "emidiosouza", "AtAndDev" ], "count": 9 }, { "reaction": "๐Ÿ‘", "users": [ "noobmldude", "skalaliya", "ShaneTian", "Hecdin", "AtAndDev" ], "count": 5 } ]
2024-09-26T16:31:56.000Z
2024-09-26T16:31:56.482Z
[]
/posts/singhsidhukuldeep/384487520549880
2,489
0
719995159142282
[ { "type": "text", "value": " We did a thing! Eight weeks into our Hugging Face tenure, we can demo a round-trip of Xet-backed files from our local machine to a prod Hugging Face S3 bucket and back. ๐Ÿš€", "raw": " We did a thing! Eight weeks into our Hugging Face tenure, we can demo a round-trip of Xet-backed files from our local machine to a prod Hugging Face S3 bucket and back. ๐Ÿš€", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Itโ€™s been exciting to dive into how the Hub is built and design our steel thread through the infrastructure. Now that the thread is up, we can kick off project Capacious Extremis ๐Ÿช„ to add all the other goodies: authentication, authorization, deduplication, privacy, and more. ", "raw": "Itโ€™s been exciting to dive into how the Hub is built and design our steel thread through the infrastructure. Now that the thread is up, we can kick off project Capacious Extremis ๐Ÿช„ to add all the other goodies: authentication, authorization, deduplication, privacy, and more. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What does this mean for you? Youโ€™re one step closer to โšก faster downloads, uploads, and iterative development on Hugging Face Hub!โ€จThis is our first step toward replacing Git LFS as the Hub's storage backend: ", "raw": "What does this mean for you? Youโ€™re one step closer to โšก faster downloads, uploads, and iterative development on Hugging Face Hub!โ€จThis is our first step toward replacing Git LFS as the Hub's storage backend: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/xethub-joins-hf", "href": "https://huggingface.co/blog/xethub-joins-hf", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check out the demo on LinkedIn to see the transfer in action: ", "raw": "Check out the demo on LinkedIn to see the transfer in action: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.linkedin.com/posts/annux_youve-heard-of-blue-steel-but-have-activity-7245062126535405568-3cvJ", "href": "https://www.linkedin.com/posts/annux_youve-heard-of-blue-steel-but-have-activity-7245062126535405568-3cvJ", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
We did a thing! Eight weeks into our Hugging Face tenure, we can demo a round-trip of Xet-backed files from our local machine to a prod Hugging Face S3 bucket and back. ๐Ÿš€ Itโ€™s been exciting to dive into how the Hub is built and design our steel thread through the infrastructure. Now that the thread is up, we can kick off project Capacious Extremis ๐Ÿช„ to add all the other goodies: authentication, authorization, deduplication, privacy, and more. What does this mean for you? Youโ€™re one step closer to โšก faster downloads, uploads, and iterative development on Hugging Face Hub!โ€จThis is our first step toward replacing Git LFS as the Hub's storage backend: https://huggingface.co/blog/xethub-joins-hf Check out the demo on LinkedIn to see the transfer in action: https://www.linkedin.com/posts/annux_youve-heard-of-blue-steel-but-have-activity-7245062126535405568-3cvJ
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/66b05ca6e7c57eac7cafbbc4/nddUkS3xu78cxCS-r7-xB.jpeg", "fullname": "Ann Huang", "name": "erinys", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 27, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "jsulz", "John6666", "davidberenstein1957", "gabrielmbmb", "clem" ], "count": 5 } ]
2024-09-26T16:07:27.000Z
2024-09-26T16:10:48.832Z
[]
/posts/erinys/719995159142282
1,371
0
988164848850810
[ { "type": "text", "value": "My earliest / most popular AI example of using ", "raw": "My earliest / most popular AI example of using ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@pytorch", "href": null, "resource": null, "url": null, "code": null, "user": "pytorch", "label": null, "lang": null }, { "type": "text", "value": " for art has been updated with AI Pair Programming (AIPP) to use resolution of original inputs with ", "raw": " for art has been updated with AI Pair Programming (AIPP) to use resolution of original inputs with ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@gradio", "href": null, "resource": null, "url": null, "code": null, "user": "gradio", "label": null, "lang": null }, { "type": "text", "value": " and ", "raw": " and ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@AnthropicAI", "href": null, "resource": null, "url": null, "code": null, "user": "AnthropicAI", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "For anyone learning AI model development - this is a great starter!", "raw": "For anyone learning AI model development - this is a great starter!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/awacke1/Image-to-Line-Drawings", "href": null, "resource": { "type": "space", "id": "awacke1/Image-to-Line-Drawings", "discussionNum": null }, "url": "https://huggingface.co/spaces/awacke1/Image-to-Line-Drawings", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
My earliest / most popular AI example of using @pytorch for art has been updated with AI Pair Programming (AIPP) to use resolution of original inputs with @gradio and @AnthropicAI For anyone learning AI model development - this is a great starter! https://huggingface.co/spaces/awacke1/Image-to-Line-Drawings
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1656147940537-620630b603825909dcbeba35.jpeg", "fullname": "Aaron C Wacker", "name": "awacke1", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 185, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/1L0FQKTmGXtyH2KwvkBjR.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-09-26T15:40:51.000Z
2024-09-26T15:40:51.057Z
[]
/posts/awacke1/988164848850810
740
0
772003078630193
[ { "type": "text", "value": "๐Ÿ•ต๐Ÿป ๐€๐ ๐ž๐ง๐ญ๐ข๐œ ๐‘๐€๐† ๐ฐ๐ข๐ญ๐ก ๐Ÿฆ™ ๐‹๐ฅ๐š๐ฆ๐š 3.2", "raw": "๐Ÿ•ต๐Ÿป ๐€๐ ๐ž๐ง๐ญ๐ข๐œ ๐‘๐€๐† ๐ฐ๐ข๐ญ๐ก ๐Ÿฆ™ ๐‹๐ฅ๐š๐ฆ๐š 3.2", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I was excited to explore Llama 3.2, but as a simple ๐Ÿ‡ช๐Ÿ‡บ EU guy, I don't have access to Meta's multimodal models ๐Ÿ˜ฟ", "raw": "I was excited to explore Llama 3.2, but as a simple ๐Ÿ‡ช๐Ÿ‡บ EU guy, I don't have access to Meta's multimodal models ๐Ÿ˜ฟ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿค” So I thought: why not challenge the small 3B text model with Agentic RAG?", "raw": "๐Ÿค” So I thought: why not challenge the small 3B text model with Agentic RAG?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐ŸŽฏ The plan:", "raw": "๐ŸŽฏ The plan:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Build a system that tries to answer questions using a knowledge base.", "raw": "- Build a system that tries to answer questions using a knowledge base.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- If the documents don't contain the answer, use Web search for additional context.", "raw": "- If the documents don't contain the answer, use Web search for additional context.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check out my experimental notebook here: ๐Ÿ““ ", "raw": "Check out my experimental notebook here: ๐Ÿ““ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://colab.research.google.com/github/deepset-ai/haystack-cookbook/blob/main/notebooks/llama32_agentic_rag.ipynb", "href": "https://colab.research.google.com/github/deepset-ai/haystack-cookbook/blob/main/notebooks/llama32_agentic_rag.ipynb", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "My stack:", "raw": "My stack:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ—๏ธ haystack (", "raw": "๐Ÿ—๏ธ haystack (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://haystack.deepset.ai/", "href": "https://haystack.deepset.ai/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "): open-source LLM orchestration framework", "raw": "): open-source LLM orchestration framework", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿฆ™ ", "raw": "๐Ÿฆ™ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct", "href": null, "resource": { "type": "model", "id": "meta-llama/Llama-3.2-3B-Instruct", "discussionNum": null }, "url": "https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿฆ†๐ŸŒ free DuckDuckGo API, integrated with Haystack", "raw": "๐Ÿฆ†๐ŸŒ free DuckDuckGo API, integrated with Haystack", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โœจ ๐˜›๐˜ฉ๐˜ฆ ๐˜ณ๐˜ฆ๐˜ด๐˜ถ๐˜ญ๐˜ต๐˜ด? ๐˜Œ๐˜ฏ๐˜ค๐˜ฐ๐˜ถ๐˜ณ๐˜ข๐˜จ๐˜ช๐˜ฏ๐˜จ - ๐˜ข ๐˜ง๐˜ฆ๐˜ธ ๐˜ฎ๐˜ฐ๐˜ฏ๐˜ต๐˜ฉ๐˜ด ๐˜ข๐˜จ๐˜ฐ, ๐˜ต๐˜ฉ๐˜ช๐˜ด ๐˜ญ๐˜ฆ๐˜ท๐˜ฆ๐˜ญ ๐˜ฐ๐˜ง ๐˜ฑ๐˜ฆ๐˜ณ๐˜ง๐˜ฐ๐˜ณ๐˜ฎ๐˜ข๐˜ฏ๐˜ค๐˜ฆ ๐˜ง๐˜ณ๐˜ฐ๐˜ฎ ๐˜ข ๐˜ด๐˜ฎ๐˜ข๐˜ญ๐˜ญ ๐˜ฎ๐˜ฐ๐˜ฅ๐˜ฆ๐˜ญ ๐˜ธ๐˜ฐ๐˜ถ๐˜ญ๐˜ฅ'๐˜ท๐˜ฆ ๐˜ฃ๐˜ฆ๐˜ฆ๐˜ฏ ๐˜ถ๐˜ฏ๐˜ต๐˜ฉ๐˜ช๐˜ฏ๐˜ฌ๐˜ข๐˜ฃ๐˜ญ๐˜ฆ!", "raw": "โœจ ๐˜›๐˜ฉ๐˜ฆ ๐˜ณ๐˜ฆ๐˜ด๐˜ถ๐˜ญ๐˜ต๐˜ด? ๐˜Œ๐˜ฏ๐˜ค๐˜ฐ๐˜ถ๐˜ณ๐˜ข๐˜จ๐˜ช๐˜ฏ๐˜จ - ๐˜ข ๐˜ง๐˜ฆ๐˜ธ ๐˜ฎ๐˜ฐ๐˜ฏ๐˜ต๐˜ฉ๐˜ด ๐˜ข๐˜จ๐˜ฐ, ๐˜ต๐˜ฉ๐˜ช๐˜ด ๐˜ญ๐˜ฆ๐˜ท๐˜ฆ๐˜ญ ๐˜ฐ๐˜ง ๐˜ฑ๐˜ฆ๐˜ณ๐˜ง๐˜ฐ๐˜ณ๐˜ฎ๐˜ข๐˜ฏ๐˜ค๐˜ฆ ๐˜ง๐˜ณ๐˜ฐ๐˜ฎ ๐˜ข ๐˜ด๐˜ฎ๐˜ข๐˜ญ๐˜ญ ๐˜ฎ๐˜ฐ๐˜ฅ๐˜ฆ๐˜ญ ๐˜ธ๐˜ฐ๐˜ถ๐˜ญ๐˜ฅ'๐˜ท๐˜ฆ ๐˜ฃ๐˜ฆ๐˜ฆ๐˜ฏ ๐˜ถ๐˜ฏ๐˜ต๐˜ฉ๐˜ช๐˜ฏ๐˜ฌ๐˜ข๐˜ฃ๐˜ญ๐˜ฆ!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This probably reflects the impressive IFEval score of the model (comparable to Llama 3.1 8B).", "raw": "This probably reflects the impressive IFEval score of the model (comparable to Llama 3.1 8B).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿ•ต๐Ÿป ๐€๐ ๐ž๐ง๐ญ๐ข๐œ ๐‘๐€๐† ๐ฐ๐ข๐ญ๐ก ๐Ÿฆ™ ๐‹๐ฅ๐š๐ฆ๐š 3.2 I was excited to explore Llama 3.2, but as a simple ๐Ÿ‡ช๐Ÿ‡บ EU guy, I don't have access to Meta's multimodal models ๐Ÿ˜ฟ ๐Ÿค” So I thought: why not challenge the small 3B text model with Agentic RAG? ๐ŸŽฏ The plan: - Build a system that tries to answer questions using a knowledge base. - If the documents don't contain the answer, use Web search for additional context. Check out my experimental notebook here: ๐Ÿ““ https://colab.research.google.com/github/deepset-ai/haystack-cookbook/blob/main/notebooks/llama32_agentic_rag.ipynb My stack: ๐Ÿ—๏ธ haystack (https://haystack.deepset.ai/): open-source LLM orchestration framework ๐Ÿฆ™ https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct ๐Ÿฆ†๐ŸŒ free DuckDuckGo API, integrated with Haystack โœจ ๐˜›๐˜ฉ๐˜ฆ ๐˜ณ๐˜ฆ๐˜ด๐˜ถ๐˜ญ๐˜ต๐˜ด? ๐˜Œ๐˜ฏ๐˜ค๐˜ฐ๐˜ถ๐˜ณ๐˜ข๐˜จ๐˜ช๐˜ฏ๐˜จ - ๐˜ข ๐˜ง๐˜ฆ๐˜ธ ๐˜ฎ๐˜ฐ๐˜ฏ๐˜ต๐˜ฉ๐˜ด ๐˜ข๐˜จ๐˜ฐ, ๐˜ต๐˜ฉ๐˜ช๐˜ด ๐˜ญ๐˜ฆ๐˜ท๐˜ฆ๐˜ญ ๐˜ฐ๐˜ง ๐˜ฑ๐˜ฆ๐˜ณ๐˜ง๐˜ฐ๐˜ณ๐˜ฎ๐˜ข๐˜ฏ๐˜ค๐˜ฆ ๐˜ง๐˜ณ๐˜ฐ๐˜ฎ ๐˜ข ๐˜ด๐˜ฎ๐˜ข๐˜ญ๐˜ญ ๐˜ฎ๐˜ฐ๐˜ฅ๐˜ฆ๐˜ญ ๐˜ธ๐˜ฐ๐˜ถ๐˜ญ๐˜ฅ'๐˜ท๐˜ฆ ๐˜ฃ๐˜ฆ๐˜ฆ๐˜ฏ ๐˜ถ๐˜ฏ๐˜ต๐˜ฉ๐˜ช๐˜ฏ๐˜ฌ๐˜ข๐˜ฃ๐˜ญ๐˜ฆ! This probably reflects the impressive IFEval score of the model (comparable to Llama 3.1 8B).
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626505d493e0b04d75710566/9rfJc9ORXU9J5a42Ev3v6.png", "fullname": "Stefano Fiorucci", "name": "anakin87", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 66, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "asoria", "emanuelaboros", "John6666", "PKPL", "Marvin73", "ZNDHAESE" ], "count": 6 } ]
2024-09-26T14:05:14.000Z
2024-09-27T07:56:03.257Z
[]
/posts/anakin87/772003078630193
1,704
0
560314856478741
[ { "type": "text", "value": "Transformers v4.45.0 released: includes a lightning-fast method to build tools! โšก๏ธ", "raw": "Transformers v4.45.0 released: includes a lightning-fast method to build tools! โšก๏ธ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "During user research with colleagues ", "raw": "During user research with colleagues ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@MoritzLaurer", "href": null, "resource": null, "url": null, "code": null, "user": "MoritzLaurer", "label": null, "lang": null }, { "type": "text", "value": " and ", "raw": " and ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@Jofthomas", "href": null, "resource": null, "url": null, "code": null, "user": "Jofthomas", "label": null, "lang": null }, { "type": "text", "value": " , we discovered that the class definition currently in used to define a Tool in ", "raw": " , we discovered that the class definition currently in used to define a Tool in ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`transformers.agents`", "href": null, "resource": null, "url": null, "code": "transformers.agents", "user": null, "label": null, "lang": null }, { "type": "text", "value": " is a bit tedious to use, because it goes in great detail.", "raw": " is a bit tedious to use, because it goes in great detail.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โžก๏ธ So Iโ€™ve made an easier way to build tools: just make a function with type hints + a docstring, and add a ", "raw": "โžก๏ธ So Iโ€™ve made an easier way to build tools: just make a function with type hints + a docstring, and add a ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@tool", "href": null, "resource": null, "url": null, "code": null, "user": "tool", "label": null, "lang": null }, { "type": "text", "value": " decorator in front.", "raw": " decorator in front.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โœ…ย Voilร , youโ€™re good to go!", "raw": "โœ…ย Voilร , youโ€™re good to go!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read all about it in the new doc here: ", "raw": "Read all about it in the new doc here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/docs/transformers/main/en/agents#create-a-new-tool", "href": "https://huggingface.co/docs/transformers/main/en/agents#create-a-new-tool", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "And donโ€™t hesitate to give feedback, Iโ€™m all ears! ๐Ÿค—", "raw": "And donโ€™t hesitate to give feedback, Iโ€™m all ears! ๐Ÿค—", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Transformers v4.45.0 released: includes a lightning-fast method to build tools! โšก๏ธ During user research with colleagues @MoritzLaurer and @Jofthomas , we discovered that the class definition currently in used to define a Tool in `transformers.agents` is a bit tedious to use, because it goes in great detail. โžก๏ธ So Iโ€™ve made an easier way to build tools: just make a function with type hints + a docstring, and add a @tool decorator in front. โœ…ย Voilร , youโ€™re good to go! Read all about it in the new doc here: https://huggingface.co/docs/transformers/main/en/agents#create-a-new-tool And donโ€™t hesitate to give feedback, Iโ€™m all ears! ๐Ÿค—
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 494, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/eGY-NKKcu77WNCQ7haIje.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64257c616d0f0f5f1dc6aa2a/WNXC2PcyDn-jt9ZY5Rbka.jpeg", "fullname": "Joffrey THOMAS", "name": "Jofthomas", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 83 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1613511937628-5fb15d1e84389b139cf3b508.jpeg", "fullname": "Moritz Laurer", "name": "MoritzLaurer", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 236 }, { "avatarUrl": "/avatars/3c4b303dcc49850fa50e782f8f2624b5.svg", "fullname": "Parabola", "name": "Tool", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null } ]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "Jofthomas", "John6666", "alielfilali01", "AtAndDev", "davidrd123", "MoritzLaurer" ], "count": 6 }, { "reaction": "โค๏ธ", "users": [ "louisbrulenaudet" ], "count": 1 } ]
2024-09-26T10:17:17.000Z
2024-09-26T10:17:17.210Z
[]
/posts/m-ric/560314856478741
1,492
0
371969196901274
[ { "type": "text", "value": "Want to get familiar with llama 3.2 and an actual example of structured data generation? Try out the Google Colab I created.", "raw": "Want to get familiar with llama 3.2 and an actual example of structured data generation? Try out the Google Colab I created.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Notebook: ", "raw": "Notebook: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://colab.research.google.com/drive/1nHNXUbgwRMyjFeBZbQvNLqaXGkqE4Wcs?usp=sharing", "href": "https://colab.research.google.com/drive/1nHNXUbgwRMyjFeBZbQvNLqaXGkqE4Wcs?usp=sharing", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model: ", "raw": "Model: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/hugging-quants/Llama-3.2-3B-Instruct-Q8_0-GGUF/tree/main", "href": null, "resource": { "type": "model", "id": "hugging-quants/Llama-3.2-3B-Instruct-Q8_0-GGUF", "discussionNum": null }, "url": "https://huggingface.co/hugging-quants/Llama-3.2-3B-Instruct-Q8_0-GGUF/tree/main", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Dataset: ", "raw": "Dataset: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/argilla/FinePersonas-v0.1", "href": null, "resource": { "type": "dataset", "id": "argilla/FinePersonas-v0.1", "discussionNum": null }, "url": "https://huggingface.co/datasets/argilla/FinePersonas-v0.1", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Library: ", "raw": "Library: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/argilla-io/distilabel", "href": "https://github.com/argilla-io/distilabel", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Want to get familiar with llama 3.2 and an actual example of structured data generation? Try out the Google Colab I created. Notebook: https://colab.research.google.com/drive/1nHNXUbgwRMyjFeBZbQvNLqaXGkqE4Wcs?usp=sharing Model: https://huggingface.co/hugging-quants/Llama-3.2-3B-Instruct-Q8_0-GGUF/tree/main Dataset: https://huggingface.co/datasets/argilla/FinePersonas-v0.1 Library: https://github.com/argilla-io/distilabel
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg", "fullname": "David Berenstein", "name": "davidberenstein1957", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 167, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "Kasnol", "John6666", "adamelliotfields", "LeroyDyer" ], "count": 4 }, { "reaction": "๐Ÿ”ฅ", "users": [ "asoria", "Kaligraphy247" ], "count": 2 }, { "reaction": "๐Ÿ˜Ž", "users": [ "NickyNicky" ], "count": 1 } ]
2024-09-26T08:00:31.000Z
2024-09-26T08:00:31.324Z
[]
/posts/davidberenstein1957/371969196901274
1,436
0
363699335352855
[ { "type": "text", "value": "meta just released 1b parameters model and to honor it i released arco 2 just in time for the fine-tuners to tweak around, enjoy these small powerful language models!!!", "raw": "meta just released 1b parameters model and to honor it i released arco 2 just in time for the fine-tuners to tweak around, enjoy these small powerful language models!!!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/meta-llama/Llama-3.2-1B", "href": null, "resource": { "type": "model", "id": "meta-llama/Llama-3.2-1B", "discussionNum": null }, "url": "https://huggingface.co/meta-llama/Llama-3.2-1B", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/appvoid/arco-2", "href": null, "resource": { "type": "model", "id": "appvoid/arco-2", "discussionNum": null }, "url": "https://huggingface.co/appvoid/arco-2", "code": null, "user": null, "label": null, "lang": null } ]
meta just released 1b parameters model and to honor it i released arco 2 just in time for the fine-tuners to tweak around, enjoy these small powerful language models!!! https://huggingface.co/meta-llama/Llama-3.2-1B https://huggingface.co/appvoid/arco-2
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a813dedbb9e28866a91b27/zs-RWFuXs17IfPUhxQaei.jpeg", "fullname": "appvoid", "name": "appvoid", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 35, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62a813dedbb9e28866a91b27/KPTzttQs3icfmTdspn1sx.png" } ]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "iammaggie", "John6666", "bfuzzy1", "darkzbaron", "Izac" ], "count": 5 }, { "reaction": "๐Ÿ”ฅ", "users": [ "aiisthebest" ], "count": 1 } ]
2024-09-25T21:17:25.000Z
2024-09-26T04:55:58.295Z
[ { "avatarUrl": "/avatars/89995841d20aecc23e95a9cf088e33c0.svg", "fullname": "CHRIS MCGUIRE", "name": "iammaggie", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/appvoid/363699335352855
1,823
1
973599697871972
[ { "type": "text", "value": "A big day for multimodal models! ", "raw": "A big day for multimodal models! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Llama 3.2 is out with a major update: it can now process images.", "raw": "Llama 3.2 is out with a major update: it can now process images.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Key highlights:", "raw": "Key highlights:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข 11B and 90B vision models", "raw": "โ€ข 11B and 90B vision models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Small 1B and 3B text models for mobile devices", "raw": "โ€ข Small 1B and 3B text models for mobile devices", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Eval results already on the leaderboard: ", "raw": "Eval results already on the leaderboard: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard", "href": null, "resource": { "type": "space", "id": "open-llm-leaderboard/open_llm_leaderboard", "discussionNum": null }, "url": "https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Collection: ", "raw": "Collection: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/meta-llama/llama-32-66f448ffc8c32f949b04c8cf", "href": null, "resource": { "type": "collection", "id": "meta-llama/llama-32-66f448ffc8c32f949b04c8cf", "discussionNum": null }, "url": "https://huggingface.co/collections/meta-llama/llama-32-66f448ffc8c32f949b04c8cf", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
A big day for multimodal models! Llama 3.2 is out with a major update: it can now process images. Key highlights: โ€ข 11B and 90B vision models โ€ข Small 1B and 3B text models for mobile devices Eval results already on the leaderboard: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard Collection: https://huggingface.co/collections/meta-llama/llama-32-66f448ffc8c32f949b04c8cf
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 384, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/RUeWvjsnlfbeCnMNjG2d8.jpeg" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "sachithgunasekara", "YaTharThShaRma999", "Joseph717171", "John6666", "DmitryRyumin", "ajibawa-2023", "bfuzzy1", "Stanlv", "aiisthebest", "louisbrulenaudet", "Salvor", "davanstrien", "Mackin7" ], "count": 13 }, { "reaction": "๐Ÿš€", "users": [ "Joseph717171", "den0620", "bfuzzy1", "Stanlv", "davanstrien" ], "count": 5 }, { "reaction": "๐Ÿค—", "users": [ "Joseph717171", "DmitryRyumin" ], "count": 2 } ]
2024-09-25T18:40:52.000Z
2024-09-26T11:55:49.018Z
[ { "avatarUrl": "/avatars/16f5824b31a1de65ea12272ad990d932.svg", "fullname": "Rafael", "name": "aiisthebest", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false } ]
/posts/fdaudens/973599697871972
3,289
1
563411677031749
[ { "type": "text", "value": "Exciting news in AI: Molmo, a groundbreaking family of open-source multimodal models, has just been announced! ๐Ÿš€", "raw": "Exciting news in AI: Molmo, a groundbreaking family of open-source multimodal models, has just been announced! ๐Ÿš€", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Key points:", "raw": "Key points:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Closes the gap with proprietary systems on benchmarks & human evals", "raw": "- Closes the gap with proprietary systems on benchmarks & human evals", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Trained on high-quality data (< 1M image-text pairs vs billions)", "raw": "- Trained on high-quality data (< 1M image-text pairs vs billions)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Introduces pointing capability for rich interactions", "raw": "- Introduces pointing capability for rich interactions", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Fully open weights, data, and training code", "raw": "- Fully open weights, data, and training code", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The 72B model outperforms several proprietary systems, while the 1B model nearly matches GPT-4V. Small is indeed the new big in AI!", "raw": "The 72B model outperforms several proprietary systems, while the 1B model nearly matches GPT-4V. Small is indeed the new big in AI!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "There's an interactive demo available using Molmo-7B-D. Definitely worth checking out to see its capabilities firsthand.", "raw": "There's an interactive demo available using Molmo-7B-D. Definitely worth checking out to see its capabilities firsthand.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "All model weights, data, and code will be released soon. This is a significant step towards truly open, cutting-edge multimodal AI.", "raw": "All model weights, data, and code will be released soon. This is a significant step towards truly open, cutting-edge multimodal AI.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The future of AI research and applications is looking brighter than ever! ๐Ÿค–๐Ÿ–ผ๏ธ", "raw": "The future of AI research and applications is looking brighter than ever! ๐Ÿค–๐Ÿ–ผ๏ธ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ‘‰ Demo: ", "raw": "๐Ÿ‘‰ Demo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://molmo.allenai.org/", "href": "https://molmo.allenai.org/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ‘‰ Models: ", "raw": "๐Ÿ‘‰ Models: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/allenai/molmo-66f379e6fe3b8ef090a8ca19", "href": null, "resource": { "type": "collection", "id": "allenai/molmo-66f379e6fe3b8ef090a8ca19", "discussionNum": null }, "url": "https://huggingface.co/collections/allenai/molmo-66f379e6fe3b8ef090a8ca19", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "#AI #MachineLearning #OpenSource #ComputerVision", "raw": "#AI #MachineLearning #OpenSource #ComputerVision", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Exciting news in AI: Molmo, a groundbreaking family of open-source multimodal models, has just been announced! ๐Ÿš€ Key points: - Closes the gap with proprietary systems on benchmarks & human evals - Trained on high-quality data (< 1M image-text pairs vs billions) - Introduces pointing capability for rich interactions - Fully open weights, data, and training code The 72B model outperforms several proprietary systems, while the 1B model nearly matches GPT-4V. Small is indeed the new big in AI! There's an interactive demo available using Molmo-7B-D. Definitely worth checking out to see its capabilities firsthand. All model weights, data, and code will be released soon. This is a significant step towards truly open, cutting-edge multimodal AI. The future of AI research and applications is looking brighter than ever! ๐Ÿค–๐Ÿ–ผ๏ธ ๐Ÿ‘‰ Demo: https://molmo.allenai.org/ ๐Ÿ‘‰ Models: https://huggingface.co/collections/allenai/molmo-66f379e6fe3b8ef090a8ca19 #AI #MachineLearning #OpenSource #ComputerVision
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 384, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/4kVD0qU-M2jh--bSn5r3P.png" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "YaTharThShaRma999", "John6666", "onekq", "bfuzzy1", "Yeecy", "victor", "davanstrien" ], "count": 7 } ]
2024-09-25T16:59:04.000Z
2024-09-25T16:59:04.511Z
[]
/posts/fdaudens/563411677031749
1,788
0
644671729298990
[ { "type": "text", "value": "๐Ÿ”ฅ๐ŸŽญ๐ŸŒŸ New Research Alert - ECCV 2024 (Avatars Collection)! ๐ŸŒŸ๐ŸŽญ๐Ÿ”ฅ", "raw": "๐Ÿ”ฅ๐ŸŽญ๐ŸŒŸ New Research Alert - ECCV 2024 (Avatars Collection)! ๐ŸŒŸ๐ŸŽญ๐Ÿ”ฅ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“„ Title: MeshAvatar: Learning High-quality Triangular Human Avatars from Multi-view Videos ๐Ÿ”", "raw": "๐Ÿ“„ Title: MeshAvatar: Learning High-quality Triangular Human Avatars from Multi-view Videos ๐Ÿ”", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“ Description: MeshAvatar is a novel pipeline that generates high-quality triangular human avatars from multi-view videos, enabling realistic editing and rendering through a mesh-based approach with physics-based decomposition.", "raw": "๐Ÿ“ Description: MeshAvatar is a novel pipeline that generates high-quality triangular human avatars from multi-view videos, enabling realistic editing and rendering through a mesh-based approach with physics-based decomposition.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ‘ฅ Authors: Yushuo Chen, Zerong Zheng, Zhe Li, Chao Xu, and Yebin Liu", "raw": "๐Ÿ‘ฅ Authors: Yushuo Chen, Zerong Zheng, Zhe Li, Chao Xu, and Yebin Liu", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“… Conference: ECCV, 29 Sep โ€“ 4 Oct, 2024 | Milano, Italy ๐Ÿ‡ฎ๐Ÿ‡น", "raw": "๐Ÿ“… Conference: ECCV, 29 Sep โ€“ 4 Oct, 2024 | Milano, Italy ๐Ÿ‡ฎ๐Ÿ‡น", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“„ Paper: ", "raw": "๐Ÿ“„ Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2407.08414", "href": null, "resource": { "type": "paper", "id": "2407.08414", "discussionNum": null }, "url": "https://huggingface.co/papers/2407.08414", "code": null, "user": null, "label": "MeshAvatar: Learning High-quality Triangular Human Avatars from\n Multi-view Videos (2407.08414)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐ŸŒ Github Page: ", "raw": "๐ŸŒ Github Page: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://shad0wta9.github.io/meshavatar-page", "href": "https://shad0wta9.github.io/meshavatar-page", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“ Repository: ", "raw": "๐Ÿ“ Repository: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/shad0wta9/meshavatar", "href": "https://github.com/shad0wta9/meshavatar", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“บ Video: ", "raw": "๐Ÿ“บ Video: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.youtube.com/watch?v=Kpbpujkh2iI", "href": "https://www.youtube.com/watch?v=Kpbpujkh2iI", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿš€ CVPR-2023-24-Papers: ", "raw": "๐Ÿš€ CVPR-2023-24-Papers: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/DmitryRyumin/CVPR-2023-24-Papers", "href": "https://github.com/DmitryRyumin/CVPR-2023-24-Papers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿš€ WACV-2024-Papers: ", "raw": "๐Ÿš€ WACV-2024-Papers: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/DmitryRyumin/WACV-2024-Papers", "href": "https://github.com/DmitryRyumin/WACV-2024-Papers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿš€ ICCV-2023-Papers: ", "raw": "๐Ÿš€ ICCV-2023-Papers: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/DmitryRyumin/ICCV-2023-Papers", "href": "https://github.com/DmitryRyumin/ICCV-2023-Papers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“š More Papers: more cutting-edge research presented at other conferences in the ", "raw": "๐Ÿ“š More Papers: more cutting-edge research presented at other conferences in the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers", "href": null, "resource": { "type": "space", "id": "DmitryRyumin/NewEraAI-Papers", "discussionNum": null }, "url": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " curated by ", "raw": " curated by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@DmitryRyumin", "href": null, "resource": null, "url": null, "code": null, "user": "DmitryRyumin", "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿš€ Added to the Avatars Collection: ", "raw": "๐Ÿš€ Added to the Avatars Collection: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "href": null, "resource": { "type": "collection", "id": "DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "discussionNum": null }, "url": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ” Keywords: #MeshAvatar #3DAvatars #MultiViewVideo #PhysicsBasedRendering #TriangularMesh #AvatarCreation #3DModeling #NeuralRendering #Relighting #AvatarEditing #MachineLearning #ComputerVision #ComputerGraphics #DeepLearning #AI #ECCV2024", "raw": "๐Ÿ” Keywords: #MeshAvatar #3DAvatars #MultiViewVideo #PhysicsBasedRendering #TriangularMesh #AvatarCreation #3DModeling #NeuralRendering #Relighting #AvatarEditing #MachineLearning #ComputerVision #ComputerGraphics #DeepLearning #AI #ECCV2024", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿ”ฅ๐ŸŽญ๐ŸŒŸ New Research Alert - ECCV 2024 (Avatars Collection)! ๐ŸŒŸ๐ŸŽญ๐Ÿ”ฅ ๐Ÿ“„ Title: MeshAvatar: Learning High-quality Triangular Human Avatars from Multi-view Videos ๐Ÿ” ๐Ÿ“ Description: MeshAvatar is a novel pipeline that generates high-quality triangular human avatars from multi-view videos, enabling realistic editing and rendering through a mesh-based approach with physics-based decomposition. ๐Ÿ‘ฅ Authors: Yushuo Chen, Zerong Zheng, Zhe Li, Chao Xu, and Yebin Liu ๐Ÿ“… Conference: ECCV, 29 Sep โ€“ 4 Oct, 2024 | Milano, Italy ๐Ÿ‡ฎ๐Ÿ‡น ๐Ÿ“„ Paper: https://huggingface.co/papers/2407.08414 ๐ŸŒ Github Page: https://shad0wta9.github.io/meshavatar-page ๐Ÿ“ Repository: https://github.com/shad0wta9/meshavatar ๐Ÿ“บ Video: https://www.youtube.com/watch?v=Kpbpujkh2iI ๐Ÿš€ CVPR-2023-24-Papers: https://github.com/DmitryRyumin/CVPR-2023-24-Papers ๐Ÿš€ WACV-2024-Papers: https://github.com/DmitryRyumin/WACV-2024-Papers ๐Ÿš€ ICCV-2023-Papers: https://github.com/DmitryRyumin/ICCV-2023-Papers ๐Ÿ“š More Papers: more cutting-edge research presented at other conferences in the https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers curated by @DmitryRyumin ๐Ÿš€ Added to the Avatars Collection: https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36 ๐Ÿ” Keywords: #MeshAvatar #3DAvatars #MultiViewVideo #PhysicsBasedRendering #TriangularMesh #AvatarCreation #3DModeling #NeuralRendering #Relighting #AvatarEditing #MachineLearning #ComputerVision #ComputerGraphics #DeepLearning #AI #ECCV2024
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg", "fullname": "Dmitry Ryumin", "name": "DmitryRyumin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 377, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/Ty66vEStkrPYILiHefQGh.gif" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/fKIAz-bai1jnOBkYgNrr2.gif" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/_YzupuI7e3wL7IBFry_hU.gif" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/-iK9MrXj0zmGdjT11vVO2.gif" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/rwaWl8gfcpWkBAMJAqBVE.gif" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/pNGH7iKTEyfZZ-9qSd7mw.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/P3ggHRPsWE-6lymqsLH8G.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/PprSw4IQ_hgJCWthlvxpQ.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/I_q4JXxAj4A7nCAmBGiEo.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/F0qeP7jgkJy84xYqG3Kub.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/fHU7g4Z6wfZq-Oi7eChot.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/xFHE3dvDsg5w3Qpsw9ovK.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/hV-9G4ZVcCwHKeRc_5HDb.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/NPib-EUdYCB4kUwv50-j0.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg", "fullname": "Dmitry Ryumin", "name": "DmitryRyumin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 377 } ]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "DmitryRyumin", "NeelRai", "vutrungduc7593", "bfuzzy1", "victor" ], "count": 5 }, { "reaction": "๐Ÿš€", "users": [ "NeelRai", "John6666", "bfuzzy1", "victor" ], "count": 4 }, { "reaction": "๐Ÿค—", "users": [ "DmitryRyumin", "Rsln" ], "count": 2 }, { "reaction": "๐Ÿ˜Ž", "users": [ "Winnougan" ], "count": 1 }, { "reaction": "โค๏ธ", "users": [ "Salvor" ], "count": 1 } ]
2024-09-25T16:39:08.000Z
2024-09-25T16:39:08.870Z
[]
/posts/DmitryRyumin/644671729298990
1,853
0
592547965144202
[ { "type": "text", "value": "๐Ÿ“ข Seriously, We can't go with Big5 or other non structured descriptions to diverse large amount of characters ๐Ÿ‘จโ€๐Ÿ‘ฉโ€๐Ÿ‘ฆโ€๐Ÿ‘ฆ from many books ๐Ÿ“š. Instead, The factorization + open-psychometrics antonyms extracted from dialogues is a key ๐Ÿ”‘ for automatic character profiling that purely relies on book content ๐Ÿ“–. With that, happy to share delighted to share with you ๐Ÿ™Œ more on this topic in YouTube video:", "raw": "๐Ÿ“ข Seriously, We can't go with Big5 or other non structured descriptions to diverse large amount of characters ๐Ÿ‘จโ€๐Ÿ‘ฉโ€๐Ÿ‘ฆโ€๐Ÿ‘ฆ from many books ๐Ÿ“š. Instead, The factorization + open-psychometrics antonyms extracted from dialogues is a key ๐Ÿ”‘ for automatic character profiling that purely relies on book content ๐Ÿ“–. With that, happy to share delighted to share with you ๐Ÿ™Œ more on this topic in YouTube video:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/UQQsXfZyjjc", "href": "https://youtu.be/UQQsXfZyjjc", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ”‘ From which you will find out:", "raw": "๐Ÿ”‘ From which you will find out:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โœ… How to perform book processing ๐Ÿ“– aimed at personalities extraction", "raw": "โœ… How to perform book processing ๐Ÿ“– aimed at personalities extraction", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โœ… How to impute personalities ๐Ÿ‘จโ€๐Ÿ‘ฉโ€๐Ÿ‘ฆโ€๐Ÿ‘ฆ and character network for deep learning ๐Ÿค–", "raw": "โœ… How to impute personalities ๐Ÿ‘จโ€๐Ÿ‘ฉโ€๐Ÿ‘ฆโ€๐Ÿ‘ฆ and character network for deep learning ๐Ÿค–", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โœ… How to evaluate ๐Ÿ“Š advances / experiment findings ๐Ÿงช", "raw": "โœ… How to evaluate ๐Ÿ“Š advances / experiment findings ๐Ÿงช", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Additional materials:", "raw": "Additional materials:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐ŸŒŸ Github: ", "raw": "๐ŸŒŸ Github: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/nicolay-r/book-persona-retriever", "href": "https://github.com/nicolay-r/book-persona-retriever", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“œ Paper: ", "raw": "๐Ÿ“œ Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.dropbox.com/scl/fi/0c2axh97hadolwphgu7it/rusnachenko2024personality.pdf?rlkey=g2yyzv01th2rjt4o1oky0q8zc&st=omssztha&dl=1", "href": "https://www.dropbox.com/scl/fi/0c2axh97hadolwphgu7it/rusnachenko2024personality.pdf?rlkey=g2yyzv01th2rjt4o1oky0q8zc&st=omssztha&dl=1", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“™ Google-colab experiments: ", "raw": "๐Ÿ“™ Google-colab experiments: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://colab.research.google.com/github/nicolay-r/deep-book-processing/blob/master/parlai_gutenberg_experiments.ipynb", "href": "https://colab.research.google.com/github/nicolay-r/deep-book-processing/blob/master/parlai_gutenberg_experiments.ipynb", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿฆœ Task: ", "raw": "๐Ÿฆœ Task: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/nicolay-r/parlai_bookchar_task/tree/master", "href": "https://github.com/nicolay-r/parlai_bookchar_task/tree/master", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿ“ข Seriously, We can't go with Big5 or other non structured descriptions to diverse large amount of characters ๐Ÿ‘จโ€๐Ÿ‘ฉโ€๐Ÿ‘ฆโ€๐Ÿ‘ฆ from many books ๐Ÿ“š. Instead, The factorization + open-psychometrics antonyms extracted from dialogues is a key ๐Ÿ”‘ for automatic character profiling that purely relies on book content ๐Ÿ“–. With that, happy to share delighted to share with you ๐Ÿ™Œ more on this topic in YouTube video: https://youtu.be/UQQsXfZyjjc ๐Ÿ”‘ From which you will find out: โœ… How to perform book processing ๐Ÿ“– aimed at personalities extraction โœ… How to impute personalities ๐Ÿ‘จโ€๐Ÿ‘ฉโ€๐Ÿ‘ฆโ€๐Ÿ‘ฆ and character network for deep learning ๐Ÿค– โœ… How to evaluate ๐Ÿ“Š advances / experiment findings ๐Ÿงช Additional materials: ๐ŸŒŸ Github: https://github.com/nicolay-r/book-persona-retriever ๐Ÿ“œ Paper: https://www.dropbox.com/scl/fi/0c2axh97hadolwphgu7it/rusnachenko2024personality.pdf?rlkey=g2yyzv01th2rjt4o1oky0q8zc&st=omssztha&dl=1 ๐Ÿ“™ Google-colab experiments: https://colab.research.google.com/github/nicolay-r/deep-book-processing/blob/master/parlai_gutenberg_experiments.ipynb ๐Ÿฆœ Task: https://github.com/nicolay-r/parlai_bookchar_task/tree/master
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e62d11d27a8292c3637f86/aptDeBHpCJxcREj6KPLN1.jpeg", "fullname": "Nicolay Rusnachenko", "name": "nicolay-r", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 49, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/8cT_8J6bxdY545UCdyqQY.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/HTt57XSiVmJRRcLvseMWh.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/KmSwR1DUlAnw_vvb7fSPY.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/kGoZAxIjCavJNmbwThTfJ.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "bfuzzy1" ], "count": 2 }, { "reaction": "๐Ÿ”ฅ", "users": [ "aiisthebest" ], "count": 1 } ]
2024-09-25T15:57:45.000Z
2024-09-26T16:36:12.357Z
[ { "avatarUrl": "/avatars/16f5824b31a1de65ea12272ad990d932.svg", "fullname": "Rafael", "name": "aiisthebest", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e62d11d27a8292c3637f86/aptDeBHpCJxcREj6KPLN1.jpeg", "fullname": "Nicolay Rusnachenko", "name": "nicolay-r", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 49, "isFollowing": false } ]
/posts/nicolay-r/592547965144202
1,022
2
671294053259219
[ { "type": "text", "value": "๐Ÿ™‹๐Ÿปโ€โ™‚๏ธ Hey there folks,", "raw": "๐Ÿ™‹๐Ÿปโ€โ™‚๏ธ Hey there folks,", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/stepfun-ai/GOT-OCR2_0", "href": null, "resource": { "type": "model", "id": "stepfun-ai/GOT-OCR2_0", "discussionNum": null }, "url": "https://huggingface.co/stepfun-ai/GOT-OCR2_0", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " is in top trending and spaces of the week for the second week straight !!", "raw": " is in top trending and spaces of the week for the second week straight !!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This is madness ๐Ÿ˜ฑ", "raw": "This is madness ๐Ÿ˜ฑ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿš€๐Ÿš€check out my demo here : ", "raw": "๐Ÿš€๐Ÿš€check out my demo here : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Tonic/GOT-OCR", "href": null, "resource": { "type": "space", "id": "Tonic/GOT-OCR", "discussionNum": null }, "url": "https://huggingface.co/spaces/Tonic/GOT-OCR", "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿ™‹๐Ÿปโ€โ™‚๏ธ Hey there folks, https://huggingface.co/stepfun-ai/GOT-OCR2_0 is in top trending and spaces of the week for the second week straight !! This is madness ๐Ÿ˜ฑ ๐Ÿš€๐Ÿš€check out my demo here : https://huggingface.co/spaces/Tonic/GOT-OCR
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "bfuzzy1", "clem" ], "count": 3 }, { "reaction": "๐Ÿ”ฅ", "users": [ "clem" ], "count": 1 } ]
2024-09-25T13:49:13.000Z
2024-09-25T13:49:13.340Z
[]
/posts/Tonic/671294053259219
1,240
0
166468173773107
[ { "type": "text", "value": "Researchers from ", "raw": "Researchers from ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@GoogleDeepMind", "href": null, "resource": null, "url": null, "code": null, "user": "GoogleDeepMind", "label": null, "lang": null }, { "type": "text", "value": " have introduced \"Michelangelo\" โ€” a novel framework for evaluating large language models on long-context reasoning tasks beyond simple retrieval.", "raw": " have introduced \"Michelangelo\" โ€” a novel framework for evaluating large language models on long-context reasoning tasks beyond simple retrieval.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "They have proposed three minimal tasks to test different aspects of long-context reasoning:", "raw": "They have proposed three minimal tasks to test different aspects of long-context reasoning:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Latent List: Tracking a Python list's state over many operations.", "raw": "- Latent List: Tracking a Python list's state over many operations.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- MRCR: Multi-round coreference resolution in conversations.", "raw": "- MRCR: Multi-round coreference resolution in conversations.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- IDK: Determining if an answer exists in a long context.", "raw": "- IDK: Determining if an answer exists in a long context.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "They found significant performance drop-offs before 32K tokens on these tasks, indicating room for improvement in long-context reasoning.", "raw": "They found significant performance drop-offs before 32K tokens on these tasks, indicating room for improvement in long-context reasoning.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Here are the key steps for creating the Michelangelo long-context evaluations:", "raw": "Here are the key steps for creating the Michelangelo long-context evaluations:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Develop the Latent Structure Queries (LSQ) framework:", "raw": "1. Develop the Latent Structure Queries (LSQ) framework:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Create a framework for generating long-context evaluations that can be extended arbitrarily in length and complexity.", "raw": "- Create a framework for generating long-context evaluations that can be extended arbitrarily in length and complexity.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Ensure the framework measures capabilities beyond simple retrieval.", "raw": "- Ensure the framework measures capabilities beyond simple retrieval.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Design minimal tasks using the LSQ framework:", "raw": "2. Design minimal tasks using the LSQ framework:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Create tasks that test different aspects of long-context reasoning.", "raw": "- Create tasks that test different aspects of long-context reasoning.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Ensure tasks are minimally complex while still challenging for current models.", "raw": "- Ensure tasks are minimally complex while still challenging for current models.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Implement the Latent List task:", "raw": "3. Implement the Latent List task:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Create a Python list-based task with operations that modify the list.", "raw": "- Create a Python list-based task with operations that modify the list.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Include relevant and irrelevant operations to test model understanding.", "raw": "- Include relevant and irrelevant operations to test model understanding.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Develop view operations to query the final state of the list.", "raw": "- Develop view operations to query the final state of the list.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4. Implement the Multi-Round Coreference Resolution (MRCR) task:", "raw": "4. Implement the Multi-Round Coreference Resolution (MRCR) task:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Generate conversations with user requests and model responses on various topics.", "raw": "- Generate conversations with user requests and model responses on various topics.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Place specific requests randomly in the context.", "raw": "- Place specific requests randomly in the context.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Require models to reproduce outputs based on queries about the conversation.", "raw": "- Require models to reproduce outputs based on queries about the conversation.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "5. Implement the IDK task:", "raw": "5. Implement the IDK task:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Create contexts with invented stories or information.", "raw": "- Create contexts with invented stories or information.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Develop questions that may or may not have answers in the context.", "raw": "- Develop questions that may or may not have answers in the context.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Include multiple-choice options, always including \"I don't know\" as an option.", "raw": "- Include multiple-choice options, always including \"I don't know\" as an option.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "More in comments...", "raw": "More in comments...", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Researchers from @GoogleDeepMind have introduced "Michelangelo" โ€” a novel framework for evaluating large language models on long-context reasoning tasks beyond simple retrieval. They have proposed three minimal tasks to test different aspects of long-context reasoning: - Latent List: Tracking a Python list's state over many operations. - MRCR: Multi-round coreference resolution in conversations. - IDK: Determining if an answer exists in a long context. They found significant performance drop-offs before 32K tokens on these tasks, indicating room for improvement in long-context reasoning. Here are the key steps for creating the Michelangelo long-context evaluations: 1. Develop the Latent Structure Queries (LSQ) framework: - Create a framework for generating long-context evaluations that can be extended arbitrarily in length and complexity. - Ensure the framework measures capabilities beyond simple retrieval. 2. Design minimal tasks using the LSQ framework: - Create tasks that test different aspects of long-context reasoning. - Ensure tasks are minimally complex while still challenging for current models. 3. Implement the Latent List task: - Create a Python list-based task with operations that modify the list. - Include relevant and irrelevant operations to test model understanding. - Develop view operations to query the final state of the list. 4. Implement the Multi-Round Coreference Resolution (MRCR) task: - Generate conversations with user requests and model responses on various topics. - Place specific requests randomly in the context. - Require models to reproduce outputs based on queries about the conversation. 5. Implement the IDK task: - Create contexts with invented stories or information. - Develop questions that may or may not have answers in the context. - Include multiple-choice options, always including "I don't know" as an option. More in comments...
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/s9z59hygDfL5Xyo6LFAfq.jpeg" } ]
[]
[ { "reaction": "๐Ÿง ", "users": [ "lunarflu", "John6666", "bfuzzy1", "louisbrulenaudet" ], "count": 4 }, { "reaction": "๐Ÿ”ฅ", "users": [ "ajibawa-2023", "bfuzzy1" ], "count": 2 }, { "reaction": "๐Ÿ˜Ž", "users": [ "pduf" ], "count": 1 } ]
2024-09-25T13:48:22.000Z
2024-09-25T13:48:53.419Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false } ]
/posts/singhsidhukuldeep/166468173773107
1,471
1
524131396425284
[ { "type": "text", "value": "๐Ÿ“ I wrote a tutorial on how to get started with the fine-tuning process using Hugging Face tools, providing an end-to-end workflow. ", "raw": "๐Ÿ“ I wrote a tutorial on how to get started with the fine-tuning process using Hugging Face tools, providing an end-to-end workflow. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The tutorial covers creating a new dataset using the new SQL Console ๐Ÿ›ข and fine-tuning a model with SFT, guided by the Notebook Creator App ๐Ÿ“™.", "raw": "The tutorial covers creating a new dataset using the new SQL Console ๐Ÿ›ข and fine-tuning a model with SFT, guided by the Notebook Creator App ๐Ÿ“™.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ‘‰ You can read the full article here:", "raw": "๐Ÿ‘‰ You can read the full article here:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/asoria/easy-fine-tuning-with-hf", "href": "https://huggingface.co/blog/asoria/easy-fine-tuning-with-hf", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/asoria/auto-notebook-creator", "href": null, "resource": { "type": "space", "id": "asoria/auto-notebook-creator", "discussionNum": null }, "url": "https://huggingface.co/spaces/asoria/auto-notebook-creator", "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿ“ I wrote a tutorial on how to get started with the fine-tuning process using Hugging Face tools, providing an end-to-end workflow. The tutorial covers creating a new dataset using the new SQL Console ๐Ÿ›ข and fine-tuning a model with SFT, guided by the Notebook Creator App ๐Ÿ“™. ๐Ÿ‘‰ You can read the full article here: https://huggingface.co/blog/asoria/easy-fine-tuning-with-hf https://huggingface.co/spaces/asoria/auto-notebook-creator
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674055965173-noauth.jpeg", "fullname": "Andrea Soria", "name": "asoria", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 61, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "lhoestq", "amosgyamfi", "jsulz", "John6666", "Csplk", "tosaddler", "bfuzzy1", "victor", "fffiloni", "davanstrien", "Salvor", "fsommers", "eleveny11" ], "count": 13 }, { "reaction": "๐Ÿ”ฅ", "users": [ "nazimali", "eleveny11" ], "count": 2 } ]
2024-09-25T11:50:19.000Z
2024-09-25T11:50:19.466Z
[]
/posts/asoria/524131396425284
2,365
0
705417318974122
[ { "type": "text", "value": "๐ŸŒŽ ๐“๐ก๐ž ๐Ÿ๐ข๐ซ๐ฌ๐ญ ๐ž๐ฏ๐ž๐ซ ๐…๐จ๐ฎ๐ง๐๐š๐ญ๐ข๐จ๐ง ๐ฐ๐ž๐š๐ญ๐ก๐ž๐ซ ๐ฆ๐จ๐๐ž๐ฅ: ๐๐ซ๐ข๐ญ๐ก๐ฏ๐ข ๐–๐ฑ๐‚ ๐ž๐ง๐š๐›๐ฅ๐ž๐ฌ ๐ฅ๐ข๐Ÿ๐ž-๐ฌ๐š๐ฏ๐ข๐ง๐  ๐ฐ๐ž๐š๐ญ๐ก๐ž๐ซ ๐ฉ๐ซ๐ž๐๐ข๐œ๐ญ๐ข๐จ๐ง๐ฌ", "raw": "๐ŸŒŽ ๐“๐ก๐ž ๐Ÿ๐ข๐ซ๐ฌ๐ญ ๐ž๐ฏ๐ž๐ซ ๐…๐จ๐ฎ๐ง๐๐š๐ญ๐ข๐จ๐ง ๐ฐ๐ž๐š๐ญ๐ก๐ž๐ซ ๐ฆ๐จ๐๐ž๐ฅ: ๐๐ซ๐ข๐ญ๐ก๐ฏ๐ข ๐–๐ฑ๐‚ ๐ž๐ง๐š๐›๐ฅ๐ž๐ฌ ๐ฅ๐ข๐Ÿ๐ž-๐ฌ๐š๐ฏ๐ข๐ง๐  ๐ฐ๐ž๐š๐ญ๐ก๐ž๐ซ ๐ฉ๐ซ๐ž๐๐ข๐œ๐ญ๐ข๐จ๐ง๐ฌ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Hurricane Katrina killed hundreds of people as it made landfall on New Orleans in 2005 - many of these deaths could have been avoided if alerts had been given one day earlier. Accurate weather forecasts are really life-saving.", "raw": "Hurricane Katrina killed hundreds of people as it made landfall on New Orleans in 2005 - many of these deaths could have been avoided if alerts had been given one day earlier. Accurate weather forecasts are really life-saving.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ”ฅย Now, NASA and IBM just dropped a game-changing new model: the first ever foundation model for weather! This means, it's the first time we have a generalist model not restricted to one task, but able to predict 160 weather variables!", "raw": "๐Ÿ”ฅย Now, NASA and IBM just dropped a game-changing new model: the first ever foundation model for weather! This means, it's the first time we have a generalist model not restricted to one task, but able to predict 160 weather variables!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Prithvi WxC (Prithvi, โ€œเคชเฅƒเคฅเฅเคตเฅ€โ€, is the Sanskrit name for Earth) - is a 2.3 billion parameter model, with an architecture close to previous vision transformers like Hiera. ", "raw": "Prithvi WxC (Prithvi, โ€œเคชเฅƒเคฅเฅเคตเฅ€โ€, is the Sanskrit name for Earth) - is a 2.3 billion parameter model, with an architecture close to previous vision transformers like Hiera. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ’กย But it comes with some important tweaks: under the hood, Prithvi WxC uses a clever transformer-based architecture with 25 encoder and 5 decoder blocks. It alternates between \"local\" and \"global\" attention to capture both regional and global weather patterns.", "raw": "๐Ÿ’กย But it comes with some important tweaks: under the hood, Prithvi WxC uses a clever transformer-based architecture with 25 encoder and 5 decoder blocks. It alternates between \"local\" and \"global\" attention to capture both regional and global weather patterns.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐—ž๐—ฒ๐˜† ๐—ถ๐—ป๐˜€๐—ถ๐—ด๐—ต๐˜๐˜€:", "raw": "๐—ž๐—ฒ๐˜† ๐—ถ๐—ป๐˜€๐—ถ๐—ด๐—ต๐˜๐˜€:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ”ฎ Nails short-term forecasts - Prithvi WxC crushed it on 6-12 hour predictions, even outperforming some traditional numerical weather models", "raw": "๐Ÿ”ฎ Nails short-term forecasts - Prithvi WxC crushed it on 6-12 hour predictions, even outperforming some traditional numerical weather models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐ŸŒ€ Tracks hurricanes like a champ - For Hurricane Ida, it predicted the landfall location within 5 km (vs 20+ km errors from other AI models), which is a huge progress!", "raw": "๐ŸŒ€ Tracks hurricanes like a champ - For Hurricane Ida, it predicted the landfall location within 5 km (vs 20+ km errors from other AI models), which is a huge progress!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ” 6x downscaling power - Can zoom in on weather data to 6x higher resolution with 4x lower error than basic methods", "raw": "๐Ÿ” 6x downscaling power - Can zoom in on weather data to 6x higher resolution with 4x lower error than basic methods", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐ŸŒŠ Models elusive gravity waves - Accurately simulates these crucial but hard-to-capture atmospheric oscillations", "raw": "๐ŸŒŠ Models elusive gravity waves - Accurately simulates these crucial but hard-to-capture atmospheric oscillations", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "As climate change intensifies, tools like Prithvi WxC will become more and more crucial to avoid disasters!", "raw": "As climate change intensifies, tools like Prithvi WxC will become more and more crucial to avoid disasters!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Announcement post ๐Ÿ‘‰ ", "raw": "Announcement post ๐Ÿ‘‰ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://newsroom.ibm.com/2024-09-23-ibm-and-nasa-release-open-source-ai-model-on-hugging-face-for-weather-and-climate-applications", "href": "https://newsroom.ibm.com/2024-09-23-ibm-and-nasa-release-open-source-ai-model-on-hugging-face-for-weather-and-climate-applications", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model on the Hub ๐Ÿ‘‰ ", "raw": "Model on the Hub ๐Ÿ‘‰ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/Prithvi-WxC", "href": "https://huggingface.co/Prithvi-WxC", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Thank you ", "raw": "Thank you ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@clem", "href": null, "resource": null, "url": null, "code": null, "user": "clem", "label": null, "lang": null }, { "type": "text", "value": " for highlighting it!", "raw": " for highlighting it!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐ŸŒŽ ๐“๐ก๐ž ๐Ÿ๐ข๐ซ๐ฌ๐ญ ๐ž๐ฏ๐ž๐ซ ๐…๐จ๐ฎ๐ง๐๐š๐ญ๐ข๐จ๐ง ๐ฐ๐ž๐š๐ญ๐ก๐ž๐ซ ๐ฆ๐จ๐๐ž๐ฅ: ๐๐ซ๐ข๐ญ๐ก๐ฏ๐ข ๐–๐ฑ๐‚ ๐ž๐ง๐š๐›๐ฅ๐ž๐ฌ ๐ฅ๐ข๐Ÿ๐ž-๐ฌ๐š๐ฏ๐ข๐ง๐  ๐ฐ๐ž๐š๐ญ๐ก๐ž๐ซ ๐ฉ๐ซ๐ž๐๐ข๐œ๐ญ๐ข๐จ๐ง๐ฌ Hurricane Katrina killed hundreds of people as it made landfall on New Orleans in 2005 - many of these deaths could have been avoided if alerts had been given one day earlier. Accurate weather forecasts are really life-saving. ๐Ÿ”ฅย Now, NASA and IBM just dropped a game-changing new model: the first ever foundation model for weather! This means, it's the first time we have a generalist model not restricted to one task, but able to predict 160 weather variables! Prithvi WxC (Prithvi, โ€œเคชเฅƒเคฅเฅเคตเฅ€โ€, is the Sanskrit name for Earth) - is a 2.3 billion parameter model, with an architecture close to previous vision transformers like Hiera. ๐Ÿ’กย But it comes with some important tweaks: under the hood, Prithvi WxC uses a clever transformer-based architecture with 25 encoder and 5 decoder blocks. It alternates between "local" and "global" attention to capture both regional and global weather patterns. ๐—ž๐—ฒ๐˜† ๐—ถ๐—ป๐˜€๐—ถ๐—ด๐—ต๐˜๐˜€: ๐Ÿ”ฎ Nails short-term forecasts - Prithvi WxC crushed it on 6-12 hour predictions, even outperforming some traditional numerical weather models ๐ŸŒ€ Tracks hurricanes like a champ - For Hurricane Ida, it predicted the landfall location within 5 km (vs 20+ km errors from other AI models), which is a huge progress! ๐Ÿ” 6x downscaling power - Can zoom in on weather data to 6x higher resolution with 4x lower error than basic methods ๐ŸŒŠ Models elusive gravity waves - Accurately simulates these crucial but hard-to-capture atmospheric oscillations As climate change intensifies, tools like Prithvi WxC will become more and more crucial to avoid disasters! Announcement post ๐Ÿ‘‰ https://newsroom.ibm.com/2024-09-23-ibm-and-nasa-release-open-source-ai-model-on-hugging-face-for-weather-and-climate-applications Model on the Hub ๐Ÿ‘‰ https://huggingface.co/Prithvi-WxC Thank you @clem for highlighting it!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 494, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/MiPW0IsXtmedJ5OcuAafY.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg", "fullname": "Clem ๐Ÿค—", "name": "clem", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1763 } ]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "KingNish", "YaTharThShaRma999", "Tonic", "Michielo", "John6666", "clem", "p3nGu1nZz", "bfuzzy1", "drguilhermeapolinario", "davanstrien", "Hanyu66", "louisbrulenaudet" ], "count": 12 }, { "reaction": "๐Ÿค—", "users": [ "venkata1995", "Tonic", "Michielo", "clem", "vutrungduc7593", "bfuzzy1", "ummmmisthaterrie" ], "count": 7 }, { "reaction": "๐Ÿ‘€", "users": [ "tazztone", "bfuzzy1" ], "count": 2 } ]
2024-09-25T09:22:55.000Z
2024-09-25T09:22:55.988Z
[]
/posts/m-ric/705417318974122
3,214
0
719640137244278
[ { "type": "mention", "value": null, "raw": "@mariagrandury", "href": null, "resource": null, "url": null, "code": null, "user": "mariagrandury", "label": null, "lang": null }, { "type": "text", "value": " (SomosNLP) and team releases the Spanish leaderboard !!!", "raw": " (SomosNLP) and team releases the Spanish leaderboard !!!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It is impressive how they choosed to design this leaderboard and how it support 4 languages (all part of Spain ofc).", "raw": "It is impressive how they choosed to design this leaderboard and how it support 4 languages (all part of Spain ofc).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check it out from this link :", "raw": "Check it out from this link :", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/la-leaderboard/la-leaderboard", "href": null, "resource": { "type": "space", "id": "la-leaderboard/la-leaderboard", "discussionNum": null }, "url": "https://huggingface.co/spaces/la-leaderboard/la-leaderboard", "code": null, "user": null, "label": null, "lang": null } ]
@mariagrandury (SomosNLP) and team releases the Spanish leaderboard !!! It is impressive how they choosed to design this leaderboard and how it support 4 languages (all part of Spain ofc). Check it out from this link : https://huggingface.co/spaces/la-leaderboard/la-leaderboard
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626237d9bbcbd1c34f1bb231/EJrOjvAL-68qMCYdnvOrq.png", "fullname": "Ali El Filali", "name": "alielfilali01", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 186, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1665073337782-5f9c00a5777efc07d7f1e4be.png", "fullname": "Marรญa Grandury", "name": "mariagrandury", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2283 } ]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "bfuzzy1", "mariagrandury" ], "count": 3 }, { "reaction": "โค๏ธ", "users": [ "mariagrandury", "Danilkarat" ], "count": 2 } ]
2024-09-25T05:56:17.000Z
2024-10-03T15:15:56.859Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1665073337782-5f9c00a5777efc07d7f1e4be.png", "fullname": "Marรญa Grandury", "name": "mariagrandury", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2283, "isFollowing": false } ]
/posts/alielfilali01/719640137244278
1,195
1
568770637677491
[ { "type": "text", "value": "In August, the XetHub team joined Hugging Face ", "raw": "In August, the XetHub team joined Hugging Face ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ", "raw": "- ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/xethub-joins-hf", "href": "https://huggingface.co/blog/xethub-joins-hf", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - and weโ€™ve been rolling up our sleeves to bring the best of both worlds together. We started with a deep dive into the current state of files stored with Git LFS on the Hub.", "raw": " - and weโ€™ve been rolling up our sleeves to bring the best of both worlds together. We started with a deep dive into the current state of files stored with Git LFS on the Hub.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Getting this information was no small feat. We had to:", "raw": "Getting this information was no small feat. We had to:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* Analyze a complete database dump of all repositories and files stored in Git LFS across Hugging Face.", "raw": "* Analyze a complete database dump of all repositories and files stored in Git LFS across Hugging Face.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* Parse through metadata on file sizes and types to accurately map the storage breakdown across Spaces, Models, and Datasets.", "raw": "* Parse through metadata on file sizes and types to accurately map the storage breakdown across Spaces, Models, and Datasets.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You can read more about the findings (with some jaw-dropping stats + charts) here ", "raw": "You can read more about the findings (with some jaw-dropping stats + charts) here ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.linkedin.com/feed/update/urn:li:activity:7244486280351285248", "href": "https://www.linkedin.com/feed/update/urn:li:activity:7244486280351285248", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
In August, the XetHub team joined Hugging Face - https://huggingface.co/blog/xethub-joins-hf - and weโ€™ve been rolling up our sleeves to bring the best of both worlds together. We started with a deep dive into the current state of files stored with Git LFS on the Hub. Getting this information was no small feat. We had to: * Analyze a complete database dump of all repositories and files stored in Git LFS across Hugging Face. * Parse through metadata on file sizes and types to accurately map the storage breakdown across Spaces, Models, and Datasets. You can read more about the findings (with some jaw-dropping stats + charts) here https://www.linkedin.com/feed/update/urn:li:activity:7244486280351285248
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65d50e9ef9cbfa798c590004/FlVe8chafigMfrPpMeJRL.jpeg", "fullname": "Jared Sulzdorf", "name": "jsulz", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 47, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿš€", "users": [ "John6666", "davidberenstein1957", "YaTharThShaRma999", "erinys", "adorkin", "fffiloni", "davanstrien", "lhoestq", "julien-c", "celinah", "ArthurZ", "veeraleto", "ZennyKenny" ], "count": 13 } ]
2024-09-24T23:44:39.000Z
2024-09-27T14:47:39.125Z
[]
/posts/jsulz/568770637677491
1,961
1
440740113386646
[ { "type": "text", "value": "Here is my latest study on OpenAI๐Ÿ“o1๐Ÿ“.", "raw": "Here is my latest study on OpenAI๐Ÿ“o1๐Ÿ“.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2409.13773", "href": null, "resource": { "type": "paper", "id": "2409.13773", "discussionNum": null }, "url": "https://huggingface.co/papers/2409.13773", "code": null, "user": null, "label": "A Case Study of Web App Coding with OpenAI Reasoning Models (2409.13773)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I wrote an easy-to-read blogpost to explain finding.", "raw": "I wrote an easy-to-read blogpost to explain finding.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/onekq/daily-software-engineering-work-reasoning-models", "href": "https://huggingface.co/blog/onekq/daily-software-engineering-work-reasoning-models", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "INSTRUCTION FOLLOWING is the key.", "raw": "INSTRUCTION FOLLOWING is the key.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "100% instruction following + Reasoning = new SOTA", "raw": "100% instruction following + Reasoning = new SOTA", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "But if the model misses or misunderstands one instruction, it can perform far worse than non-reasoning models.", "raw": "But if the model misses or misunderstands one instruction, it can perform far worse than non-reasoning models.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Here is my latest study on OpenAI๐Ÿ“o1๐Ÿ“. https://huggingface.co/papers/2409.13773 I wrote an easy-to-read blogpost to explain finding. https://huggingface.co/blog/onekq/daily-software-engineering-work-reasoning-models INSTRUCTION FOLLOWING is the key. 100% instruction following + Reasoning = new SOTA But if the model misses or misunderstands one instruction, it can perform far worse than non-reasoning models.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/669dbd709a4bf63e08f1ddc2/aV10ZJPPzH5LbnHFZNqc7.png", "fullname": "Yi Cui", "name": "onekq", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 16, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿง ", "users": [ "YaTharThShaRma999", "victor", "Tanvir1337", "John6666", "onekq", "damerajee", "nDimensional", "nicoism", "DiamanteAmarelo" ], "count": 9 }, { "reaction": "๐Ÿ‘", "users": [ "Winnougan", "fakerbaby" ], "count": 2 }, { "reaction": "๐Ÿ”ฅ", "users": [ "Svngoku" ], "count": 1 } ]
2024-09-24T18:25:40.000Z
2024-09-24T18:25:40.196Z
[]
/posts/onekq/440740113386646
2,550
0
177229373246727
[ { "type": "text", "value": "Some interesting findings in this paper: ", "raw": "Some interesting findings in this paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- They consider o1 a Large Reasoning Model (LRM) with a different arch from SOTA LLMs.", "raw": "- They consider o1 a Large Reasoning Model (LRM) with a different arch from SOTA LLMs.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Creative justifications: โ€œIt is almost as if o1 has gone from hallucinating to gaslighting!โ€. This is so true, I noticed also it can โ€œhallucinateโ€ its chain-of-thoughts lol. ", "raw": "- Creative justifications: โ€œIt is almost as if o1 has gone from hallucinating to gaslighting!โ€. This is so true, I noticed also it can โ€œhallucinateโ€ its chain-of-thoughts lol. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Accuracy/Cost Tradeoffs: o1 provides high accuracy but at significant computational and monetary costs due to hidden \"reasoning tokens.\"", "raw": "- Accuracy/Cost Tradeoffs: o1 provides high accuracy but at significant computational and monetary costs due to hidden \"reasoning tokens.\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper: ", "raw": "Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.arxiv.org/abs/2409.13373", "href": "https://www.arxiv.org/abs/2409.13373", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Some interesting findings in this paper: - They consider o1 a Large Reasoning Model (LRM) with a different arch from SOTA LLMs. - Creative justifications: โ€œIt is almost as if o1 has gone from hallucinating to gaslighting!โ€. This is so true, I noticed also it can โ€œhallucinateโ€ its chain-of-thoughts lol. - Accuracy/Cost Tradeoffs: o1 provides high accuracy but at significant computational and monetary costs due to hidden "reasoning tokens." Paper: https://www.arxiv.org/abs/2409.13373
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg", "fullname": "Jaward Sesay", "name": "Jaward", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 191, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/n5LGKNUnBBgEJPvr299G7.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/f4jCd29drYqiyKxJZATMG.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/GBNgsnxpVmzoCQ_3w301_.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/9GxjUnw9wr_h0SV9rUxmj.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/cD0k2sOITrla9cYdXUZuf.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "victor", "John6666", "VinitT" ], "count": 3 } ]
2024-09-24T14:27:13.000Z
2024-09-24T14:27:31.888Z
[]
/posts/Jaward/177229373246727
1,340
0
895352115971302
[ { "type": "text", "value": "Streaming Text-to-Speech Chat Demo (CPU Inference Client)", "raw": "Streaming Text-to-Speech Chat Demo (CPU Inference Client)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Akjava/mistral-7b-v0.3-matcha-tts-en", "href": null, "resource": { "type": "space", "id": "Akjava/mistral-7b-v0.3-matcha-tts-en", "discussionNum": null }, "url": "https://huggingface.co/spaces/Akjava/mistral-7b-v0.3-matcha-tts-en", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Please be patient, as it may take over a minute to load the ONNX model.", "raw": "Please be patient, as it may take over a minute to load the ONNX model.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This demo utilizes an inference client, which may occasionally become unresponsive.", "raw": "This demo utilizes an inference client, which may occasionally become unresponsive.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Streaming Text-to-Speech Chat Demo (CPU Inference Client) https://huggingface.co/spaces/Akjava/mistral-7b-v0.3-matcha-tts-en Please be patient, as it may take over a minute to load the ONNX model. This demo utilizes an inference client, which may occasionally become unresponsive.
{ "avatarUrl": "/avatars/fb866e3758189d70488fc6a879151f45.svg", "fullname": "Akihito Miyazaki", "name": "Akjava", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 4, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿš€", "users": [ "John6666", "sghimire", "victor" ], "count": 3 }, { "reaction": "๐Ÿ”ฅ", "users": [ "sghimire", "woody23" ], "count": 2 } ]
2024-09-24T12:53:57.000Z
2024-09-24T12:53:57.956Z
[]
/posts/Akjava/895352115971302
1,416
0
636612660877076
[ { "type": "text", "value": "Yesterday, I shared a blog post on generating data for fine-tuning ColPali using the ", "raw": "Yesterday, I shared a blog post on generating data for fine-tuning ColPali using the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct", "href": null, "resource": { "type": "model", "id": "Qwen/Qwen2-VL-7B-Instruct", "discussionNum": null }, "url": "https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " model.", "raw": " model.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "To simplify testing this approach, I created a Space that lets you generate queries from an input document page image: ", "raw": "To simplify testing this approach, I created a Space that lets you generate queries from an input document page image: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/davanstrien/ColPali-Query-Generator", "href": null, "resource": { "type": "space", "id": "davanstrien/ColPali-Query-Generator", "discussionNum": null }, "url": "https://huggingface.co/spaces/davanstrien/ColPali-Query-Generator", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I think there is much room for improvement, but I'm excited about the potential for relatively small VLMs to create synthetic data.", "raw": "I think there is much room for improvement, but I'm excited about the potential for relatively small VLMs to create synthetic data.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You can read the original blog post that goes into more detail here: ", "raw": "You can read the original blog post that goes into more detail here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://danielvanstrien.xyz/posts/post-with-code/colpali/2024-09-23-generate_colpali_dataset.html", "href": "https://danielvanstrien.xyz/posts/post-with-code/colpali/2024-09-23-generate_colpali_dataset.html", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Yesterday, I shared a blog post on generating data for fine-tuning ColPali using the https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct model. To simplify testing this approach, I created a Space that lets you generate queries from an input document page image: https://huggingface.co/spaces/davanstrien/ColPali-Query-Generator I think there is much room for improvement, but I'm excited about the potential for relatively small VLMs to create synthetic data. You can read the original blog post that goes into more detail here: https://danielvanstrien.xyz/posts/post-with-code/colpali/2024-09-23-generate_colpali_dataset.html
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1627505688463-60107b385ac3e86b3ea4fc34.jpeg", "fullname": "Daniel van Strien", "name": "davanstrien", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 410, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60107b385ac3e86b3ea4fc34/qx43deJHsjLT56rMytl98.png" } ]
[]
[ { "reaction": "โž•", "users": [ "blooket", "John6666", "AtAndDev", "victor", "fsommers" ], "count": 5 }, { "reaction": "โค๏ธ", "users": [ "louisbrulenaudet", "AtAndDev", "Csplk", "johko" ], "count": 4 }, { "reaction": "๐Ÿ‘", "users": [ "fsommers", "UniLLMer" ], "count": 2 }, { "reaction": "๐Ÿš€", "users": [ "AtAndDev" ], "count": 1 } ]
2024-09-24T12:35:07.000Z
2024-09-24T13:42:40.758Z
[]
/posts/davanstrien/636612660877076
2,182
0
875802394417024
[ { "type": "text", "value": "We have published an excellent paper for Arabic CLIP model.", "raw": "We have published an excellent paper for Arabic CLIP model.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper link:", "raw": "Paper link:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://aclanthology.org/2024.arabicnlp-1.9/", "href": "https://aclanthology.org/2024.arabicnlp-1.9/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "More information in this website:", "raw": "More information in this website:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arabic-clip.github.io/Arabic-CLIP/", "href": "https://arabic-clip.github.io/Arabic-CLIP/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "All datasets, models, and demo are published to Huggingface:", "raw": "All datasets, models, and demo are published to Huggingface:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/Arabic-Clip", "href": "https://huggingface.co/Arabic-Clip", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The codes are published to github:", "raw": "The codes are published to github:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/Arabic-Clip/Arabic-CLIP", "href": "https://github.com/Arabic-Clip/Arabic-CLIP", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
We have published an excellent paper for Arabic CLIP model. Paper link: https://aclanthology.org/2024.arabicnlp-1.9/ More information in this website: https://arabic-clip.github.io/Arabic-CLIP/ All datasets, models, and demo are published to Huggingface: https://huggingface.co/Arabic-Clip The codes are published to github: https://github.com/Arabic-Clip/Arabic-CLIP
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61934cc71832e6ac3837d8b0/y5d8VCVsQPQFnYM3BT-ew.jpeg", "fullname": "Muhammad Al-Barham", "name": "pain", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 19, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/61934cc71832e6ac3837d8b0/pbbrfmJXyMfiOrbzUlFUx.png" } ]
[]
[ { "reaction": "โค๏ธ", "users": [ "doddymee", "monsoon-nlp", "xi0v", "davanstrien", "victor", "pain", "not-lain" ], "count": 7 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "xi0v" ], "count": 2 }, { "reaction": "๐Ÿš€", "users": [ "xi0v", "manoumhd99" ], "count": 2 }, { "reaction": "๐Ÿ”ฅ", "users": [ "xi0v" ], "count": 1 } ]
2024-09-23T22:15:56.000Z
2024-09-24T14:34:54.011Z
[ { "avatarUrl": "/avatars/591252948eb38a09b9907239ceaca520.svg", "fullname": "Mishl", "name": "mishl", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false } ]
/posts/pain/875802394417024
2,428
1
363488600632163
[ { "type": "text", "value": "Keep track of the latest 3D releases with this space๐Ÿ‘‰ ", "raw": "Keep track of the latest 3D releases with this space๐Ÿ‘‰ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/dylanebert/research-tracker", "href": null, "resource": { "type": "space", "id": "dylanebert/research-tracker", "discussionNum": null }, "url": "https://huggingface.co/spaces/dylanebert/research-tracker", "code": null, "user": null, "label": null, "lang": null } ]
Keep track of the latest 3D releases with this space๐Ÿ‘‰ https://huggingface.co/spaces/dylanebert/research-tracker
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672164046414-624b4a964056e2a6914a05c5.png", "fullname": "Dylan Ebert", "name": "dylanebert", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1764, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/624b4a964056e2a6914a05c5/r7NYqrpHYoGjE7YEd0F68.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "iacc", "Csplk", "DavidVivancos", "aiisthebest", "victor" ], "count": 6 }, { "reaction": "โค๏ธ", "users": [ "aiisthebest", "clem", "not-lain", "akashkuttappa" ], "count": 4 }, { "reaction": "๐Ÿ”ฅ", "users": [ "aiisthebest" ], "count": 1 }, { "reaction": "๐Ÿš€", "users": [ "aiisthebest" ], "count": 1 } ]
2024-09-23T21:07:41.000Z
2024-10-15T17:19:36.051Z
[ { "avatarUrl": "/avatars/58d293748c9ecc9195ae09b2c1c9bc4a.svg", "fullname": "m", "name": "melo513", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672164046414-624b4a964056e2a6914a05c5.png", "fullname": "Dylan Ebert", "name": "dylanebert", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1764, "isFollowing": false } ]
/posts/dylanebert/363488600632163
2,713
2
916541472058940
[ { "type": "text", "value": "๐Ÿ“ข It is less meaningful to prompt LLM directly for opinion mining. Instead, the ", "raw": "๐Ÿ“ข It is less meaningful to prompt LLM directly for opinion mining. Instead, the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Three-hop (๐Ÿ’กaspect + ๐Ÿค”opinion + ๐Ÿง reason) Chain-of-Thought reasoning concept ", "raw": "Three-hop (๐Ÿ’กaspect + ๐Ÿค”opinion + ๐Ÿง reason) Chain-of-Thought reasoning concept ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "represent a decent solution for reason sentiments. ", "raw": "represent a decent solution for reason sentiments. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "After a series of the related posts here on huggingface, I am happy to invite you ๐Ÿ™Œ on my talk @ NLPSummit2024. ", "raw": "After a series of the related posts here on huggingface, I am happy to invite you ๐Ÿ™Œ on my talk @ NLPSummit2024. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I am going to take part of the Healthcare Day 2 (25th of September) with the calendar details and ๐Ÿ–‡ link to the event below ๐Ÿ‘‡ ", "raw": "I am going to take part of the Healthcare Day 2 (25th of September) with the calendar details and ๐Ÿ–‡ link to the event below ๐Ÿ‘‡ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐ŸŽค Event: ", "raw": "๐ŸŽค Event: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.nlpsummit.org/nlp-summit-2024/", "href": "https://www.nlpsummit.org/nlp-summit-2024/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“… Calendar event: ", "raw": "๐Ÿ“… Calendar event: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://calendar.app.google/f7AUhNHuTw5JtPs36", "href": "https://calendar.app.google/f7AUhNHuTw5JtPs36", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โฒ Time: 25th of September @ 2:00 PM ET โ€“ 2:30 PM ET", "raw": "โฒ Time: 25th of September @ 2:00 PM ET โ€“ 2:30 PM ET", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“Š Benchmark: ", "raw": "๐Ÿ“Š Benchmark: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/nicolay-r/RuSentNE-LLM-Benchmark", "href": "https://github.com/nicolay-r/RuSentNE-LLM-Benchmark", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿง  Framework: ", "raw": "๐Ÿง  Framework: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/nicolay-r/Reasoning-for-Sentiment-Analysis-Framework", "href": "https://github.com/nicolay-r/Reasoning-for-Sentiment-Analysis-Framework", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“ Paper: ", "raw": "๐Ÿ“ Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2404.12342", "href": null, "resource": { "type": "paper", "id": "2404.12342", "discussionNum": null }, "url": "https://huggingface.co/papers/2404.12342", "code": null, "user": null, "label": "Large Language Models in Targeted Sentiment Analysis (2404.12342)", "lang": null } ]
๐Ÿ“ข It is less meaningful to prompt LLM directly for opinion mining. Instead, the Three-hop (๐Ÿ’กaspect + ๐Ÿค”opinion + ๐Ÿง reason) Chain-of-Thought reasoning concept represent a decent solution for reason sentiments. After a series of the related posts here on huggingface, I am happy to invite you ๐Ÿ™Œ on my talk @ NLPSummit2024. I am going to take part of the Healthcare Day 2 (25th of September) with the calendar details and ๐Ÿ–‡ link to the event below ๐Ÿ‘‡ ๐ŸŽค Event: https://www.nlpsummit.org/nlp-summit-2024/ ๐Ÿ“… Calendar event: https://calendar.app.google/f7AUhNHuTw5JtPs36 โฒ Time: 25th of September @ 2:00 PM ET โ€“ 2:30 PM ET ๐Ÿ“Š Benchmark: https://github.com/nicolay-r/RuSentNE-LLM-Benchmark ๐Ÿง  Framework: https://github.com/nicolay-r/Reasoning-for-Sentiment-Analysis-Framework ๐Ÿ“ Paper: https://huggingface.co/papers/2404.12342
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e62d11d27a8292c3637f86/aptDeBHpCJxcREj6KPLN1.jpeg", "fullname": "Nicolay Rusnachenko", "name": "nicolay-r", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 49, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/2ZjQkLGiN0LyLrZfPvFyg.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-09-23T19:59:43.000Z
2024-09-23T20:00:38.253Z
[]
/posts/nicolay-r/916541472058940
665
0
845850475427869
[ { "type": "text", "value": "Although this might sound like another way to make money on LLM API calls...", "raw": "Although this might sound like another way to make money on LLM API calls...", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Good folks at ", "raw": "Good folks at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@AnthropicAI", "href": null, "resource": null, "url": null, "code": null, "user": "AnthropicAI", "label": null, "lang": null }, { "type": "text", "value": " just introduced Contextual Retrieval, and it's a significant yet logical step up from simple Retrieval-Augmented Generation (RAG)!", "raw": " just introduced Contextual Retrieval, and it's a significant yet logical step up from simple Retrieval-Augmented Generation (RAG)!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Here are the steps to implement Contextual Retrieval based on Anthropic's approach:", "raw": "Here are the steps to implement Contextual Retrieval based on Anthropic's approach:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Preprocess the knowledge base:", "raw": "1. Preprocess the knowledge base:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Break down documents into smaller chunks (typically a few hundred tokens each).", "raw": " - Break down documents into smaller chunks (typically a few hundred tokens each).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Generate contextual information for each chunk using Claude 3 Haiku with a specific prompt.", "raw": " - Generate contextual information for each chunk using Claude 3 Haiku with a specific prompt.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Prepend the generated context (usually 50-100 tokens) to each chunk.", "raw": " - Prepend the generated context (usually 50-100 tokens) to each chunk.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Create embeddings and a BM25 index:", "raw": "2. Create embeddings and a BM25 index:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Use an embedding model (Gemini or Voyage recommended) to convert contextualized chunks into vector embeddings.", "raw": " - Use an embedding model (Gemini or Voyage recommended) to convert contextualized chunks into vector embeddings.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Create a BM25 index using the contextualized chunks.", "raw": " - Create a BM25 index using the contextualized chunks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Set up the retrieval process:", "raw": "3. Set up the retrieval process:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Implement a system to search both the vector embeddings and the BM25 index.", "raw": " - Implement a system to search both the vector embeddings and the BM25 index.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Use rank fusion techniques to combine and deduplicate results from both searches.", "raw": " - Use rank fusion techniques to combine and deduplicate results from both searches.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4. Implement reranking (optional but recommended):", "raw": "4. Implement reranking (optional but recommended):", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Retrieve the top 150 potentially relevant chunks initially.", "raw": " - Retrieve the top 150 potentially relevant chunks initially.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Use a reranking model (e.g., Cohere reranker) to score these chunks based on relevance to the query.", "raw": " - Use a reranking model (e.g., Cohere reranker) to score these chunks based on relevance to the query.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Select the top 20 chunks after reranking.", "raw": " - Select the top 20 chunks after reranking.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "5. Integrate with the generative model:", "raw": "5. Integrate with the generative model:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Add the top 20 chunks (or top K, based on your specific needs) to the prompt sent to the generative model.", "raw": " - Add the top 20 chunks (or top K, based on your specific needs) to the prompt sent to the generative model.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "6. Optimize for your use case:", "raw": "6. Optimize for your use case:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Experiment with chunk sizes, boundary selection, and overlap.", "raw": " - Experiment with chunk sizes, boundary selection, and overlap.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Consider creating custom contextualizer prompts for your specific domain.", "raw": " - Consider creating custom contextualizer prompts for your specific domain.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Test different numbers of retrieved chunks (5, 10, 20) to find the optimal balance.", "raw": " - Test different numbers of retrieved chunks (5, 10, 20) to find the optimal balance.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "7. Leverage prompt caching:", "raw": "7. Leverage prompt caching:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Use Claude's prompt caching feature to reduce costs when generating contextualized chunks.", "raw": " - Use Claude's prompt caching feature to reduce costs when generating contextualized chunks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Cache the reference document once and reference it for each chunk, rather than passing it repeatedly.", "raw": " - Cache the reference document once and reference it for each chunk, rather than passing it repeatedly.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "8. Evaluate and iterate", "raw": "8. Evaluate and iterate", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Although this might sound like another way to make money on LLM API calls... Good folks at @AnthropicAI just introduced Contextual Retrieval, and it's a significant yet logical step up from simple Retrieval-Augmented Generation (RAG)! Here are the steps to implement Contextual Retrieval based on Anthropic's approach: 1. Preprocess the knowledge base: - Break down documents into smaller chunks (typically a few hundred tokens each). - Generate contextual information for each chunk using Claude 3 Haiku with a specific prompt. - Prepend the generated context (usually 50-100 tokens) to each chunk. 2. Create embeddings and a BM25 index: - Use an embedding model (Gemini or Voyage recommended) to convert contextualized chunks into vector embeddings. - Create a BM25 index using the contextualized chunks. 3. Set up the retrieval process: - Implement a system to search both the vector embeddings and the BM25 index. - Use rank fusion techniques to combine and deduplicate results from both searches. 4. Implement reranking (optional but recommended): - Retrieve the top 150 potentially relevant chunks initially. - Use a reranking model (e.g., Cohere reranker) to score these chunks based on relevance to the query. - Select the top 20 chunks after reranking. 5. Integrate with the generative model: - Add the top 20 chunks (or top K, based on your specific needs) to the prompt sent to the generative model. 6. Optimize for your use case: - Experiment with chunk sizes, boundary selection, and overlap. - Consider creating custom contextualizer prompts for your specific domain. - Test different numbers of retrieved chunks (5, 10, 20) to find the optimal balance. 7. Leverage prompt caching: - Use Claude's prompt caching feature to reduce costs when generating contextualized chunks. - Cache the reference document once and reference it for each chunk, rather than passing it repeatedly. 8. Evaluate and iterate
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/1Qs4WTZ9OUmLVZIqftHsz.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "KingNish", "clem", "victor", "louisbrulenaudet" ], "count": 5 }, { "reaction": "๐Ÿ”ฅ", "users": [ "badamczewski01", "mathiasn1" ], "count": 2 } ]
2024-09-23T17:59:28.000Z
2024-09-23T17:59:28.693Z
[]
/posts/singhsidhukuldeep/845850475427869
1,515
0
981676883481405
[ { "type": "text", "value": "RWKV-7 \"Goose\" preview rc2 => Peak RNN architecture?๐Ÿ˜ƒWill try to squeeze more performance for the final release. Preview code & model: ", "raw": "RWKV-7 \"Goose\" preview rc2 => Peak RNN architecture?๐Ÿ˜ƒWill try to squeeze more performance for the final release. Preview code & model: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/BlinkDL/RWKV-LM/tree/main/RWKV-v7", "href": "https://github.com/BlinkDL/RWKV-LM/tree/main/RWKV-v7", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
RWKV-7 "Goose" preview rc2 => Peak RNN architecture?๐Ÿ˜ƒWill try to squeeze more performance for the final release. Preview code & model: https://github.com/BlinkDL/RWKV-LM/tree/main/RWKV-v7
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1655953609090-noauth.jpeg", "fullname": "BlinkDL", "name": "BlinkDL", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 613, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62b3d8d651b07307bd12b7f0/z0V3LuJWYS70AYRylCuDG.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "YaTharThShaRma999", "Kotokin", "wannaphong", "ZhangRC", "DataSoul", "victor", "shoumenchougou", "MoonRide", "ArthurZ" ], "count": 10 }, { "reaction": "๐Ÿš€", "users": [ "wannaphong", "cnmoro", "ZhangRC", "alielfilali01" ], "count": 4 }, { "reaction": "๐Ÿ‘", "users": [ "dingo-actual", "shoumenchougou", "irodkin" ], "count": 3 }, { "reaction": "โค๏ธ", "users": [ "ZhangRC", "alielfilali01" ], "count": 2 }, { "reaction": "๐Ÿ”ฅ", "users": [ "asigalov61" ], "count": 1 } ]
2024-09-23T15:51:44.000Z
2024-10-01T18:22:46.859Z
[ { "avatarUrl": "/avatars/82a167e5b2f6a646e856685f22deb919.svg", "fullname": "abbaa", "name": "notlober", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f57ea2d3f32f12a3c0692e6/b-9GG2p--smCameUPeCBN.jpeg", "fullname": "Alex", "name": "asigalov61", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 65, "isFollowing": false } ]
/posts/BlinkDL/981676883481405
4,617
2
833495365282114
[ { "type": "text", "value": "ColPali is revolutionizing multimodal retrieval, but could it be even more effective with domain-specific fine-tuning?", "raw": "ColPali is revolutionizing multimodal retrieval, but could it be even more effective with domain-specific fine-tuning?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check out my latest blog post, where I guide you through creating a ColPali fine-tuning dataset using ", "raw": "Check out my latest blog post, where I guide you through creating a ColPali fine-tuning dataset using ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct", "href": null, "resource": { "type": "model", "id": "Qwen/Qwen2-VL-7B-Instruct", "discussionNum": null }, "url": "https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " to generate queries for a collection of UFO documents sourced from the Internet Archive. ", "raw": " to generate queries for a collection of UFO documents sourced from the Internet Archive. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The post covers:", "raw": "The post covers:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Introduction to data for ColPali models", "raw": "- Introduction to data for ColPali models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Using Qwen2-VL for retrieval query generation", "raw": "- Using Qwen2-VL for retrieval query generation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Tips for better query generation", "raw": "- Tips for better query generation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check out the post here:", "raw": "Check out the post here:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://danielvanstrien.xyz/posts/post-with-code/colpali/2024-09-23-generate_colpali_dataset.html", "href": "https://danielvanstrien.xyz/posts/post-with-code/colpali/2024-09-23-generate_colpali_dataset.html", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The resulting Hugging Face dataset: ", "raw": "The resulting Hugging Face dataset: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/davanstrien/ufo-ColPali", "href": null, "resource": { "type": "dataset", "id": "davanstrien/ufo-ColPali", "discussionNum": null }, "url": "https://huggingface.co/datasets/davanstrien/ufo-ColPali", "code": null, "user": null, "label": null, "lang": null } ]
ColPali is revolutionizing multimodal retrieval, but could it be even more effective with domain-specific fine-tuning? Check out my latest blog post, where I guide you through creating a ColPali fine-tuning dataset using https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct to generate queries for a collection of UFO documents sourced from the Internet Archive. The post covers: - Introduction to data for ColPali models - Using Qwen2-VL for retrieval query generation - Tips for better query generation Check out the post here: https://danielvanstrien.xyz/posts/post-with-code/colpali/2024-09-23-generate_colpali_dataset.html The resulting Hugging Face dataset: https://huggingface.co/datasets/davanstrien/ufo-ColPali
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1627505688463-60107b385ac3e86b3ea4fc34.jpeg", "fullname": "Daniel van Strien", "name": "davanstrien", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 410, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "lbourdois", "Chunte", "TheDrunkenSnail", "den0620", "louisbrulenaudet", "merve", "AtAndDev" ], "count": 8 }, { "reaction": "๐Ÿš€", "users": [ "lbourdois", "Chunte", "merve", "AtAndDev", "adorkin" ], "count": 5 }, { "reaction": "โค๏ธ", "users": [ "aiisthebest", "AtAndDev", "baslak", "VoVAllen" ], "count": 4 }, { "reaction": "๐Ÿง ", "users": [ "Csplk", "merve", "AtAndDev" ], "count": 3 } ]
2024-09-23T14:28:27.000Z
2024-09-24T05:27:50.732Z
[ { "avatarUrl": "/avatars/b2725bb163fa15d6c5856121780d52eb.svg", "fullname": "Ci Splunk", "name": "Csplk", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 43, "isFollowing": false } ]
/posts/davanstrien/833495365282114
3,143
1
661418655932123
[ { "type": "text", "value": "Looking for a logo idea ๐Ÿ‘€ ?", "raw": "Looking for a logo idea ๐Ÿ‘€ ?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I made a new cool space ", "raw": "I made a new cool space ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/enzostvs/Logo.Ai", "href": null, "resource": { "type": "space", "id": "enzostvs/Logo.Ai", "discussionNum": null }, "url": "https://huggingface.co/spaces/enzostvs/Logo.Ai", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " to help you design a great logo in seconds!", "raw": " to help you design a great logo in seconds!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Here are some examples of what you can do, feel free to share yours too! ๐Ÿš€", "raw": "Here are some examples of what you can do, feel free to share yours too! ๐Ÿš€", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Looking for a logo idea ๐Ÿ‘€ ? I made a new cool space https://huggingface.co/spaces/enzostvs/Logo.Ai to help you design a great logo in seconds! Here are some examples of what you can do, feel free to share yours too! ๐Ÿš€
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64f73f25098581ab15e2f5ad/P_-5s8W6hA7wrs5ggF2Xu.jpeg", "fullname": "enzo", "name": "enzostvs", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 213, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64f73f25098581ab15e2f5ad/0x-OgcFmOz28MOdKmyt-M.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64f73f25098581ab15e2f5ad/SRh3pD-K3gPLKso1qFprP.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64f73f25098581ab15e2f5ad/vT_IhfA9caUcsU9V11DTV.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64f73f25098581ab15e2f5ad/qT7VtnNjdgWnex0iRZNF9.png" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "victor", "julien-c", "KingNish", "John6666", "bramvera", "Chunte", "den0620", "louisbrulenaudet", "sghimire", "AtAndDev", "Eyel", "OmbelineM" ], "count": 12 } ]
2024-09-23T14:26:25.000Z
2024-09-23T14:26:25.430Z
[]
/posts/enzostvs/661418655932123
3,418
0
350368866710018
[ { "type": "text", "value": "IBM & NASA just released open-source AI model for weather & climate on Hugging Face. ", "raw": "IBM & NASA just released open-source AI model for weather & climate on Hugging Face. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Prithvi WxC offers insights beyond forecasting, tackling challenges from local weather to global climate. Potential apps: targeted forecasts, severe weather detection & more. ", "raw": "Prithvi WxC offers insights beyond forecasting, tackling challenges from local weather to global climate. Potential apps: targeted forecasts, severe weather detection & more. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/Prithvi-WxC", "href": "https://huggingface.co/Prithvi-WxC", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This is impressive. Check out this comparison of the Ida hurricane between ground truth and the AI model's prediction.", "raw": "This is impressive. Check out this comparison of the Ida hurricane between ground truth and the AI model's prediction.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
IBM & NASA just released open-source AI model for weather & climate on Hugging Face. Prithvi WxC offers insights beyond forecasting, tackling challenges from local weather to global climate. Potential apps: targeted forecasts, severe weather detection & more. https://huggingface.co/Prithvi-WxC This is impressive. Check out this comparison of the Ida hurricane between ground truth and the AI model's prediction.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 384, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/TlAWyTQcyKrQ5N9OG6696.gif" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "KvrParaskevi", "John6666", "Colince", "den0620", "pagezyhf", "crystal99", "mrm8488", "firqaaa", "tarob0ba", "louisbrulenaudet", "acouloumy-novaa", "jmackie" ], "count": 12 }, { "reaction": "โค๏ธ", "users": [ "Svngoku", "NguyenDraspThanhDat", "smach", "DrGsWorld" ], "count": 4 } ]
2024-09-23T13:25:42.000Z
2024-09-23T13:25:42.361Z
[]
/posts/fdaudens/350368866710018
2,034
0
975141101619472
[ { "type": "text", "value": "The new NIM Serverless API by HF and Nvidia is a great option if you want a reliable API for open-weight LLMs like Llama-3.1-405B that are too expensive to run on your own hardware.", "raw": "The new NIM Serverless API by HF and Nvidia is a great option if you want a reliable API for open-weight LLMs like Llama-3.1-405B that are too expensive to run on your own hardware.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- It's pay-as-you-go, so it doesn't have rate limits like the standard HF Serverless API and you don't need to commit to hardware like for a dedicated endpoint.", "raw": "- It's pay-as-you-go, so it doesn't have rate limits like the standard HF Serverless API and you don't need to commit to hardware like for a dedicated endpoint.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- It works out-of-the box with the new v0.25 release of our huggingface_hub.InferenceClient", "raw": "- It works out-of-the box with the new v0.25 release of our huggingface_hub.InferenceClient", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- It's specifically tailored to a small collection of popular open-weight models. For a broader selection of open models, we recommend using the standard HF Serverless API.", "raw": "- It's specifically tailored to a small collection of popular open-weight models. For a broader selection of open models, we recommend using the standard HF Serverless API.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Note that you need a token from an Enterprise Hub organization to use it.", "raw": "- Note that you need a token from an Enterprise Hub organization to use it.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Details in this blog post: ", "raw": "Details in this blog post: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/inference-dgx-cloud", "href": "https://huggingface.co/blog/inference-dgx-cloud", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Compatible models in this HF collection: ", "raw": "Compatible models in this HF collection: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/nvidia/nim-serverless-inference-api-66a3c6fcdcb5bbc6e975b508", "href": null, "resource": { "type": "collection", "id": "nvidia/nim-serverless-inference-api-66a3c6fcdcb5bbc6e975b508", "discussionNum": null }, "url": "https://huggingface.co/collections/nvidia/nim-serverless-inference-api-66a3c6fcdcb5bbc6e975b508", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Release notes with many more features of huggingface_hub==0.25.0: ", "raw": "Release notes with many more features of huggingface_hub==0.25.0: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/huggingface/huggingface_hub/releases/tag/v0.25.0", "href": "https://github.com/huggingface/huggingface_hub/releases/tag/v0.25.0", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Copy-pasteable code in the first comment: ", "raw": "Copy-pasteable code in the first comment: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
The new NIM Serverless API by HF and Nvidia is a great option if you want a reliable API for open-weight LLMs like Llama-3.1-405B that are too expensive to run on your own hardware. - It's pay-as-you-go, so it doesn't have rate limits like the standard HF Serverless API and you don't need to commit to hardware like for a dedicated endpoint. - It works out-of-the box with the new v0.25 release of our huggingface_hub.InferenceClient - It's specifically tailored to a small collection of popular open-weight models. For a broader selection of open models, we recommend using the standard HF Serverless API. - Note that you need a token from an Enterprise Hub organization to use it. Details in this blog post: https://huggingface.co/blog/inference-dgx-cloud Compatible models in this HF collection: https://huggingface.co/collections/nvidia/nim-serverless-inference-api-66a3c6fcdcb5bbc6e975b508 Release notes with many more features of huggingface_hub==0.25.0: https://github.com/huggingface/huggingface_hub/releases/tag/v0.25.0 Copy-pasteable code in the first comment:
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1613511937628-5fb15d1e84389b139cf3b508.jpeg", "fullname": "Moritz Laurer", "name": "MoritzLaurer", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 236, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5fb15d1e84389b139cf3b508/lC7pUsikKKVXY8FTEd32h.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "davanstrien", "nbroad" ], "count": 3 } ]
2024-09-23T10:02:22.000Z
2024-09-23T11:31:34.170Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1613511937628-5fb15d1e84389b139cf3b508.jpeg", "fullname": "Moritz Laurer", "name": "MoritzLaurer", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 236, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1627505688463-60107b385ac3e86b3ea4fc34.jpeg", "fullname": "Daniel van Strien", "name": "davanstrien", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 410, "isFollowing": false } ]
/posts/MoritzLaurer/975141101619472
2,062
2
874685805652553
[ { "type": "text", "value": "๐ŸŽ‰ Exciting News: Argilla 2.2.0 is Here! ๐Ÿš€", "raw": "๐ŸŽ‰ Exciting News: Argilla 2.2.0 is Here! ๐Ÿš€", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We're thrilled to announce the release of Argilla 2.2.0, packed with powerful new features to enhance your data annotation and LLM workflow:", "raw": "We're thrilled to announce the release of Argilla 2.2.0, packed with powerful new features to enhance your data annotation and LLM workflow:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ—จ๏ธ ChatField: Work with text conversations natively in Argilla. Perfect for building datasets for conversational LLMs!", "raw": "๐Ÿ—จ๏ธ ChatField: Work with text conversations natively in Argilla. Perfect for building datasets for conversational LLMs!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โš™๏ธ Adjustable Task Distribution: Modify settings on the fly and automatically recalculate completed and pending records.", "raw": "โš™๏ธ Adjustable Task Distribution: Modify settings on the fly and automatically recalculate completed and pending records.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“Š Progress Tracking: Monitor annotation progress directly from the SDK, including user-specific metrics.", "raw": "๐Ÿ“Š Progress Tracking: Monitor annotation progress directly from the SDK, including user-specific metrics.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿง  Automatic Settings Inference: Importing datasets from Hugging Face Hub just got easier with automatic settings detection.", "raw": "๐Ÿง  Automatic Settings Inference: Importing datasets from Hugging Face Hub just got easier with automatic settings detection.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“‹ Task Templates: Jump-start your projects with pre-built templates for common dataset types.", "raw": "๐Ÿ“‹ Task Templates: Jump-start your projects with pre-built templates for common dataset types.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ”ง Background Jobs Support: Improved performance for long-running tasks (requires Redis).", "raw": "๐Ÿ”ง Background Jobs Support: Improved performance for long-running tasks (requires Redis).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Upgrade now and supercharge your data workflows! ", "raw": "Upgrade now and supercharge your data workflows! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check out our full changelog for more details: ", "raw": "Check out our full changelog for more details: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/argilla-io/argilla/compare/v2.1.0...v2.2.0", "href": "https://github.com/argilla-io/argilla/compare/v2.1.0...v2.2.0", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐ŸŽ‰ Exciting News: Argilla 2.2.0 is Here! ๐Ÿš€ We're thrilled to announce the release of Argilla 2.2.0, packed with powerful new features to enhance your data annotation and LLM workflow: ๐Ÿ—จ๏ธ ChatField: Work with text conversations natively in Argilla. Perfect for building datasets for conversational LLMs! โš™๏ธ Adjustable Task Distribution: Modify settings on the fly and automatically recalculate completed and pending records. ๐Ÿ“Š Progress Tracking: Monitor annotation progress directly from the SDK, including user-specific metrics. ๐Ÿง  Automatic Settings Inference: Importing datasets from Hugging Face Hub just got easier with automatic settings detection. ๐Ÿ“‹ Task Templates: Jump-start your projects with pre-built templates for common dataset types. ๐Ÿ”ง Background Jobs Support: Improved performance for long-running tasks (requires Redis). Upgrade now and supercharge your data workflows! Check out our full changelog for more details: https://github.com/argilla-io/argilla/compare/v2.1.0...v2.2.0
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg", "fullname": "David Berenstein", "name": "davidberenstein1957", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 167, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿš€", "users": [ "bergr7f", "davidberenstein1957", "John6666", "djuna", "gabrielmbmb", "mmhamdy", "aiisthebest", "Csplk" ], "count": 8 }, { "reaction": "โค๏ธ", "users": [ "davidberenstein1957", "mmhamdy", "seanmiranda", "louisbrulenaudet", "aiisthebest" ], "count": 5 }, { "reaction": "๐Ÿ”ฅ", "users": [ "davidberenstein1957", "mmhamdy", "aiisthebest" ], "count": 3 }, { "reaction": "๐Ÿ‘", "users": [ "aiisthebest" ], "count": 1 } ]
2024-09-23T09:55:49.000Z
2024-09-23T09:55:49.761Z
[]
/posts/davidberenstein1957/874685805652553
2,146
0
234687257934234
[ { "type": "text", "value": "No-Code LLM Fine-Tuning and Debugging in Real Time: Case Study", "raw": "No-Code LLM Fine-Tuning and Debugging in Real Time: Case Study", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Full doc at ", "raw": "Full doc at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://mltblog.com/47DisG5", "href": "https://mltblog.com/47DisG5", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Have you tried the xLLM web API? It allows you to fine-tune and debug an agentic multi-LLM in real time. The input data is part of the anonymized corporate corpus of a Fortune 100 company, dealing with AI policies, documentation, integration, best practices, references, onboarding, and so on. It features one sub-LLM. The full corpus is broken down into 15 sub-LLMs.", "raw": "Have you tried the xLLM web API? It allows you to fine-tune and debug an agentic multi-LLM in real time. The input data is part of the anonymized corporate corpus of a Fortune 100 company, dealing with AI policies, documentation, integration, best practices, references, onboarding, and so on. It features one sub-LLM. The full corpus is broken down into 15 sub-LLMs.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "One of the goals is to return concise but exhaustive results, using acronyms (a specific table for each sub-LLM) to map multi-tokens found in prompts but not in the corpus, with multi-tokens in the corpus. Exhaustivity is the most overlooked metric when evaluating LLMs designed for search / retrieval. Using xLLM in combination with another LLMs is one of the best approaches, and both can be used to evaluate each other. Yet, thanks to fast in-memory processing, no weight, and no training, the xLLM web API is one of its kind, with capabilities not found in any competing product, free or not.", "raw": "One of the goals is to return concise but exhaustive results, using acronyms (a specific table for each sub-LLM) to map multi-tokens found in prompts but not in the corpus, with multi-tokens in the corpus. Exhaustivity is the most overlooked metric when evaluating LLMs designed for search / retrieval. Using xLLM in combination with another LLMs is one of the best approaches, and both can be used to evaluate each other. Yet, thanks to fast in-memory processing, no weight, and no training, the xLLM web API is one of its kind, with capabilities not found in any competing product, free or not.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read more at ", "raw": "Read more at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://mltblog.com/47DisG5", "href": "https://mltblog.com/47DisG5", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
No-Code LLM Fine-Tuning and Debugging in Real Time: Case Study Full doc at https://mltblog.com/47DisG5 Have you tried the xLLM web API? It allows you to fine-tune and debug an agentic multi-LLM in real time. The input data is part of the anonymized corporate corpus of a Fortune 100 company, dealing with AI policies, documentation, integration, best practices, references, onboarding, and so on. It features one sub-LLM. The full corpus is broken down into 15 sub-LLMs. One of the goals is to return concise but exhaustive results, using acronyms (a specific table for each sub-LLM) to map multi-tokens found in prompts but not in the corpus, with multi-tokens in the corpus. Exhaustivity is the most overlooked metric when evaluating LLMs designed for search / retrieval. Using xLLM in combination with another LLMs is one of the best approaches, and both can be used to evaluate each other. Yet, thanks to fast in-memory processing, no weight, and no training, the xLLM web API is one of its kind, with capabilities not found in any competing product, free or not. Read more at https://mltblog.com/47DisG5
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/669c89e98f2dbc203f9e74ab/higvnXEHeo_Ig2bgTpn47.png", "fullname": "Vincent Granville", "name": "vincentg64", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 17, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/669c89e98f2dbc203f9e74ab/xPfeV_GpzE7dU1UfF6LIS.png" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "vrushankkk", "osanseviero", "kentjzhu" ], "count": 3 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "b1ka" ], "count": 2 } ]
2024-09-23T02:05:08.000Z
2024-09-23T09:45:38.767Z
[]
/posts/vincentg64/234687257934234
1,816
2
975501981171332
[ { "type": "text", "value": "Detailed Comparison of JoyCaption Alpha One vs JoyCaption Pre-Alpha โ€” 10 Different Style Amazing Images โ€” I think JoyCaption Alpha One is the very best image captioning model at the moment for model training โ€” Works very fast and requires as low as 8.5 GB VRAM", "raw": "Detailed Comparison of JoyCaption Alpha One vs JoyCaption Pre-Alpha โ€” 10 Different Style Amazing Images โ€” I think JoyCaption Alpha One is the very best image captioning model at the moment for model training โ€” Works very fast and requires as low as 8.5 GB VRAM", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Where To Download And Install", "raw": "Where To Download And Install", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You can download our APP from here : ", "raw": "You can download our APP from here : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.patreon.com/posts/110613301", "href": "https://www.patreon.com/posts/110613301", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1-Click to install on Windows, RunPod and Massed Compute", "raw": "1-Click to install on Windows, RunPod and Massed Compute", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Official APP is here where you can try : ", "raw": "Official APP is here where you can try : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/fancyfeast/joy-caption-alpha-one", "href": null, "resource": { "type": "space", "id": "fancyfeast/joy-caption-alpha-one", "discussionNum": null }, "url": "https://huggingface.co/spaces/fancyfeast/joy-caption-alpha-one", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Have The Following Features", "raw": "Have The Following Features", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Auto downloads meta-llama/Meta-Llama-3.1โ€“8B into your Hugging Face cache folder and other necessary models into the installation folder", "raw": "Auto downloads meta-llama/Meta-Llama-3.1โ€“8B into your Hugging Face cache folder and other necessary models into the installation folder", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Use 4-bit quantization โ€” Uses 8.5 GB VRAM Total", "raw": "Use 4-bit quantization โ€” Uses 8.5 GB VRAM Total", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Overwrite existing caption file", "raw": "Overwrite existing caption file", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Append new caption to existing caption", "raw": "Append new caption to existing caption", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Remove newlines from generated captions", "raw": "Remove newlines from generated captions", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Cut off at last complete sentence", "raw": "Cut off at last complete sentence", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Discard repeating sentences", "raw": "Discard repeating sentences", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Donโ€™t save processed image", "raw": "Donโ€™t save processed image", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Caption Prefix", "raw": "Caption Prefix", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Caption Suffix", "raw": "Caption Suffix", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Custom System Prompt (Optional)", "raw": "Custom System Prompt (Optional)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Input Folder for Batch Processing", "raw": "Input Folder for Batch Processing", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Output Folder for Batch Processing (Optional)", "raw": "Output Folder for Batch Processing (Optional)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Fully supported Multi GPU captioning โ€” GPU IDs (comma-separated, e.g., 0,1,2)", "raw": "Fully supported Multi GPU captioning โ€” GPU IDs (comma-separated, e.g., 0,1,2)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Batch Size โ€” Batch captioning", "raw": "Batch Size โ€” Batch captioning", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Detailed Comparison of JoyCaption Alpha One vs JoyCaption Pre-Alpha โ€” 10 Different Style Amazing Images โ€” I think JoyCaption Alpha One is the very best image captioning model at the moment for model training โ€” Works very fast and requires as low as 8.5 GB VRAM Where To Download And Install You can download our APP from here : https://www.patreon.com/posts/110613301 1-Click to install on Windows, RunPod and Massed Compute Official APP is here where you can try : https://huggingface.co/spaces/fancyfeast/joy-caption-alpha-one Have The Following Features Auto downloads meta-llama/Meta-Llama-3.1โ€“8B into your Hugging Face cache folder and other necessary models into the installation folder Use 4-bit quantization โ€” Uses 8.5 GB VRAM Total Overwrite existing caption file Append new caption to existing caption Remove newlines from generated captions Cut off at last complete sentence Discard repeating sentences Donโ€™t save processed image Caption Prefix Caption Suffix Custom System Prompt (Optional) Input Folder for Batch Processing Output Folder for Batch Processing (Optional) Fully supported Multi GPU captioning โ€” GPU IDs (comma-separated, e.g., 0,1,2) Batch Size โ€” Batch captioning
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672531901326-6345bd89fe134dfd7a0dba40.png", "fullname": "Furkan Gรถzรผkara", "name": "MonsterMMORPG", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 376, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/WCsOvPMfO-1nT0o60EmcR.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/nu9ZQYOYzWjWSlD7N75A4.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/dn4RHCu0pnroxbEkl0hrh.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/k_ykhklijPfukiclze4gC.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/uSxs2dxIFxLQFs9X5QGUi.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/RCqVCJkYm5ohqhXpkXqoe.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/vEDYCwhvzUvEJjpWYJw_e.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/7dCUTuF9cXWZ33qgj84cu.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/NGrRknRB2emTHqW3WSo9o.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/xaNhfVDuWoy6pUDSJz7Ct.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/PnPhRlOq71S1P4bYRTB8o.png" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "MonsterMMORPG", "MaziyarPanahi", "KingNish", "Andyrasika" ], "count": 4 }, { "reaction": "๐Ÿš€", "users": [ "MonsterMMORPG", "John6666", "louisbrulenaudet", "MaziyarPanahi" ], "count": 4 }, { "reaction": "โค๏ธ", "users": [ "MonsterMMORPG", "aiisthebest", "lluisagusti" ], "count": 3 }, { "reaction": "๐Ÿ‘", "users": [ "MonsterMMORPG", "leehome", "runebloodstone" ], "count": 3 }, { "reaction": "๐Ÿ‘€", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿค—", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿ˜Ž", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "โž•", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿง ", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿค", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿคฏ", "users": [ "MonsterMMORPG" ], "count": 1 } ]
2024-09-22T23:20:09.000Z
2024-09-22T23:20:09.643Z
[]
/posts/MonsterMMORPG/975501981171332
2,923
0
793053396198664
[ { "type": "text", "value": "A 'small' MobileNet-V4 update, I just pushed weights for the smallest model I've trained in the series, a 0.5 width multiplier version of the MobileNet-V4 Conv Small.", "raw": "A 'small' MobileNet-V4 update, I just pushed weights for the smallest model I've trained in the series, a 0.5 width multiplier version of the MobileNet-V4 Conv Small.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Now you may look at this and say hey, why is this impressive? 64.8% top-1 and 2.2M params? MobileNetV3-Small 0.75, and MobileNet-V2 0.5 are both fewer params (at ~2M) and over 65% top-1, what gives? Well this is where MobileNet-V4 differs from the previous versions of the model family, it trades off (gives up) a little parameter efficiency for some computational efficiency.", "raw": "Now you may look at this and say hey, why is this impressive? 64.8% top-1 and 2.2M params? MobileNetV3-Small 0.75, and MobileNet-V2 0.5 are both fewer params (at ~2M) and over 65% top-1, what gives? Well this is where MobileNet-V4 differs from the previous versions of the model family, it trades off (gives up) a little parameter efficiency for some computational efficiency.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "So, let's look at the speed. On a 4090 w/ torchcompile", "raw": "So, let's look at the speed. On a 4090 w/ torchcompile", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* 98K img/sec - ", "raw": "* 98K img/sec - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/timm/mobilenetv4_conv_small_050.e3000_r224_in1k", "href": null, "resource": { "type": "model", "id": "timm/mobilenetv4_conv_small_050.e3000_r224_in1k", "discussionNum": null }, "url": "https://huggingface.co/timm/mobilenetv4_conv_small_050.e3000_r224_in1k", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* 58K img/sec - ", "raw": "* 58K img/sec - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/timm/mobilenetv3_small_075.lamb_in1k", "href": null, "resource": { "type": "model", "id": "timm/mobilenetv3_small_075.lamb_in1k", "discussionNum": null }, "url": "https://huggingface.co/timm/mobilenetv3_small_075.lamb_in1k", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* 37K img/sec - ", "raw": "* 37K img/sec - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/timm/mobilenetv2_050.lamb_in1k", "href": null, "resource": { "type": "model", "id": "timm/mobilenetv2_050.lamb_in1k", "discussionNum": null }, "url": "https://huggingface.co/timm/mobilenetv2_050.lamb_in1k", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "And there you go, if you have a need for speed, MNV4 is the better option.", "raw": "And there you go, if you have a need for speed, MNV4 is the better option.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
A 'small' MobileNet-V4 update, I just pushed weights for the smallest model I've trained in the series, a 0.5 width multiplier version of the MobileNet-V4 Conv Small. Now you may look at this and say hey, why is this impressive? 64.8% top-1 and 2.2M params? MobileNetV3-Small 0.75, and MobileNet-V2 0.5 are both fewer params (at ~2M) and over 65% top-1, what gives? Well this is where MobileNet-V4 differs from the previous versions of the model family, it trades off (gives up) a little parameter efficiency for some computational efficiency. So, let's look at the speed. On a 4090 w/ torchcompile * 98K img/sec - https://huggingface.co/timm/mobilenetv4_conv_small_050.e3000_r224_in1k * 58K img/sec - https://huggingface.co/timm/mobilenetv3_small_075.lamb_in1k * 37K img/sec - https://huggingface.co/timm/mobilenetv2_050.lamb_in1k And there you go, if you have a need for speed, MNV4 is the better option.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1667002643224-604a5184dca2c7ac7508b849.jpeg", "fullname": "Ross Wightman", "name": "rwightman", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 221, "isFollowing": false }
[]
[]
[ { "reaction": "โค๏ธ", "users": [ "gxkok", "AI4Industry", "John6666", "osanseviero", "reach-vb", "Tonic", "byoussef", "irotem98" ], "count": 8 } ]
2024-09-22T23:17:50.000Z
2024-09-23T02:50:31.550Z
[]
/posts/rwightman/793053396198664
2,483
0
397615509713150
[ { "type": "mention", "value": null, "raw": "@victor", "href": null, "resource": null, "url": null, "code": null, "user": "victor", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@not-lain", "href": null, "resource": null, "url": null, "code": null, "user": "not-lain", "label": null, "lang": null }, { "type": "text", "value": " There has been a sudden and unusual outbreak of spam postings on the HF Forum that seem to be aimed at relaying online videos and commenting on them. It is also spanning multiple languages for some reason. I've flagged it too, but I'm not sure if the staff will be able to keep up with the manual measures in the future.", "raw": " There has been a sudden and unusual outbreak of spam postings on the HF Forum that seem to be aimed at relaying online videos and commenting on them. It is also spanning multiple languages for some reason. I've flagged it too, but I'm not sure if the staff will be able to keep up with the manual measures in the future.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
@victor @not-lain There has been a sudden and unusual outbreak of spam postings on the HF Forum that seem to be aimed at relaying online videos and commenting on them. It is also spanning multiple languages for some reason. I've flagged it too, but I'm not sure if the staff will be able to keep up with the manual measures in the future.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6527e89a8808d80ccff88b7a/CuGNmF1Et8KMQ0mCd1NEJ.jpeg", "fullname": "Lain", "name": "not-lain", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 941 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2607 } ]
[ { "reaction": "โค๏ธ", "users": [ "not-lain", "lunarflu", "no-mad" ], "count": 3 }, { "reaction": "๐Ÿ‘€", "users": [ "victor", "andito" ], "count": 2 } ]
2024-09-22T13:37:53.000Z
2024-11-23T17:40:58.949Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6527e89a8808d80ccff88b7a/CuGNmF1Et8KMQ0mCd1NEJ.jpeg", "fullname": "Lain", "name": "not-lain", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 941, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6593502ca2607099284523db/AmC0JuUV3_yk74ETYh_fI.png", "fullname": "william marshall", "name": "fuzzy-mittenz", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 16, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63171caf1cc81c5e95ed7b92/29I5Lr0vLRcQR7AfCZcYj.jpeg", "fullname": "Akim Mousterou", "name": "AkimfromParis", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 7, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/660bd4793f16e207645fb119/IHT-fPdrfdgti7bKo8UZo.jpeg", "fullname": "No-mad", "name": "no-mad", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 13, "isFollowing": false } ]
/posts/John6666/397615509713150
8,860
16
418630943924160
[ { "type": "text", "value": "It's not every day you see a research paper named \"Alice's Adventures in a Differentiable Wonderland,\" and when you open it, it's a 281-page book!", "raw": "It's not every day you see a research paper named \"Alice's Adventures in a Differentiable Wonderland,\" and when you open it, it's a 281-page book!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I haven't completed it yet, but this amazing work, written by Simone Scardapane, is a fascinating introduction to deep neural networks and differentiable programming. ", "raw": "I haven't completed it yet, but this amazing work, written by Simone Scardapane, is a fascinating introduction to deep neural networks and differentiable programming. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Some key technical highlights:", "raw": "Some key technical highlights:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Covers core concepts like automatic differentiation, stochastic optimization, and activation functions in depth", "raw": "โ€ข Covers core concepts like automatic differentiation, stochastic optimization, and activation functions in depth", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Explains modern architectures like convolutional networks, transformers, and graph neural networks", "raw": "โ€ข Explains modern architectures like convolutional networks, transformers, and graph neural networks", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Provides mathematical foundations including linear algebra, gradients, and probability theory", "raw": "โ€ข Provides mathematical foundations including linear algebra, gradients, and probability theory", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Discusses implementation details in PyTorch and JAX", "raw": "โ€ข Discusses implementation details in PyTorch and JAX", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Explores advanced topics like Bayesian neural networks and neural scaling laws", "raw": "โ€ข Explores advanced topics like Bayesian neural networks and neural scaling laws", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The book takes a unique approach, framing neural networks as compositions of differentiable primitives rather than biological analogs. It provides both theoretical insights and practical coding examples.", "raw": "The book takes a unique approach, framing neural networks as compositions of differentiable primitives rather than biological analogs. It provides both theoretical insights and practical coding examples.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I especially enjoyed the sections on:", "raw": "I especially enjoyed the sections on:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Vector-Jacobian products and reverse-mode autodiff", "raw": "โ€ข Vector-Jacobian products and reverse-mode autodiff", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Stochastic gradient descent and mini-batch optimization ", "raw": "โ€ข Stochastic gradient descent and mini-batch optimization ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข ReLU, GELU, and other modern activation functions", "raw": "โ€ข ReLU, GELU, and other modern activation functions", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Universal approximation capabilities of MLPs", "raw": "โ€ข Universal approximation capabilities of MLPs", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Whether you're new to deep learning or an experienced practitioner, this book offers valuable insights into the fundamentals and latest developments. Highly recommended for anyone working with neural networks!", "raw": "Whether you're new to deep learning or an experienced practitioner, this book offers valuable insights into the fundamentals and latest developments. Highly recommended for anyone working with neural networks!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
It's not every day you see a research paper named "Alice's Adventures in a Differentiable Wonderland," and when you open it, it's a 281-page book! I haven't completed it yet, but this amazing work, written by Simone Scardapane, is a fascinating introduction to deep neural networks and differentiable programming. Some key technical highlights: โ€ข Covers core concepts like automatic differentiation, stochastic optimization, and activation functions in depth โ€ข Explains modern architectures like convolutional networks, transformers, and graph neural networks โ€ข Provides mathematical foundations including linear algebra, gradients, and probability theory โ€ข Discusses implementation details in PyTorch and JAX โ€ข Explores advanced topics like Bayesian neural networks and neural scaling laws The book takes a unique approach, framing neural networks as compositions of differentiable primitives rather than biological analogs. It provides both theoretical insights and practical coding examples. I especially enjoyed the sections on: โ€ข Vector-Jacobian products and reverse-mode autodiff โ€ข Stochastic gradient descent and mini-batch optimization โ€ข ReLU, GELU, and other modern activation functions โ€ข Universal approximation capabilities of MLPs Whether you're new to deep learning or an experienced practitioner, this book offers valuable insights into the fundamentals and latest developments. Highly recommended for anyone working with neural networks!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/5749-lMRBO6mdzYB6r1HV.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "RogerioFreitas", "Fik", "louisbrulenaudet" ], "count": 4 }, { "reaction": "๐Ÿ‘", "users": [ "fsommers", "Fik", "Zmu", "StevenTu" ], "count": 4 }, { "reaction": "โค๏ธ", "users": [ "RogerioFreitas", "Fik", "allknowingroger" ], "count": 3 }, { "reaction": "๐Ÿ˜Ž", "users": [ "Fik" ], "count": 1 }, { "reaction": "๐Ÿ”ฅ", "users": [ "marielena42" ], "count": 1 } ]
2024-09-22T09:37:22.000Z
2024-09-22T09:37:22.135Z
[]
/posts/singhsidhukuldeep/418630943924160
2,190
0
919202074510190
[ { "type": "text", "value": "new diffusion space just dropped", "raw": "new diffusion space just dropped", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/nroggendorff/flux-lora-tester", "href": null, "resource": { "type": "space", "id": "nroggendorff/flux-lora-tester", "discussionNum": null }, "url": "https://huggingface.co/spaces/nroggendorff/flux-lora-tester", "code": null, "user": null, "label": null, "lang": null } ]
new diffusion space just dropped https://huggingface.co/spaces/nroggendorff/flux-lora-tester
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/659f000b83abded48e190901/BnXL_XYbVX6PHngfQLECW.png", "fullname": "Noa Roggendorff", "name": "nroggendorff", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 141, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/659f000b83abded48e190901/ox5cy-Sr7rOIQRTQCAMvz.png" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "John6666", "nroggendorff", "pepper13", "b1ka", "louisbrulenaudet", "julien-c", "enzostvs" ], "count": 7 }, { "reaction": "โค๏ธ", "users": [ "John6666", "nroggendorff", "pepper13", "julien-c" ], "count": 4 }, { "reaction": "๐Ÿ‘", "users": [ "John6666", "julien-c" ], "count": 2 }, { "reaction": "๐Ÿ‘€", "users": [ "pepper13", "julien-c" ], "count": 2 } ]
2024-09-22T06:58:55.000Z
2024-09-22T06:58:55.727Z
[]
/posts/nroggendorff/919202074510190
1,912
0
492719281207772
[ { "type": "text", "value": "Last Week in Medical AI: Top Research ", "raw": "Last Week in Medical AI: Top Research ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Papers/Models", "raw": "Papers/Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ…(September 14 - September 21, 2024)", "raw": "๐Ÿ…(September 14 - September 21, 2024)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ… Medical AI Paper of the Week", "raw": "๐Ÿ… Medical AI Paper of the Week", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- How to Build the Virtual Cell with Artificial Intelligence: Priorities and Opportunities", "raw": "- How to Build the Virtual Cell with Artificial Intelligence: Priorities and Opportunities", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Medical LLM & Other Models", "raw": "Medical LLM & Other Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- GP-GPT: LLMs for Gene-Phenotype Mapping", "raw": "- GP-GPT: LLMs for Gene-Phenotype Mapping", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- HuatuoGPT-II, 1-stage Training for Medical LLMs", "raw": "- HuatuoGPT-II, 1-stage Training for Medical LLMs", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- HuatuoGPT-Vision: Multimodal Medical LLMs", "raw": "- HuatuoGPT-Vision: Multimodal Medical LLMs", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Apollo: A Lightweight Multilingual Medical LLM", "raw": "- Apollo: A Lightweight Multilingual Medical LLM", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- GMISeg: General Medical Image Segmentation", "raw": "- GMISeg: General Medical Image Segmentation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Frameworks and Methodologies", "raw": "Frameworks and Methodologies", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- CoD: Chain of Diagnosis for Medical Agents", "raw": "- CoD: Chain of Diagnosis for Medical Agents", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- How to Build the Virtual Cell with AI", "raw": "- How to Build the Virtual Cell with AI", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Interpretable Visual Concept Discovery with SAM", "raw": "- Interpretable Visual Concept Discovery with SAM", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Aligning Human Knowledge for Explainable Med Image", "raw": "- Aligning Human Knowledge for Explainable Med Image", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ReXErr: Synthetic Errors in Radiology Reports", "raw": "- ReXErr: Synthetic Errors in Radiology Reports", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Veridical Data Science for Medical Foundation Models", "raw": "- Veridical Data Science for Medical Foundation Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Fine Tuning LLMs for Medicine: The Role of DPO", "raw": "- Fine Tuning LLMs for Medicine: The Role of DPO", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Clinical Trials:", "raw": "Clinical Trials:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- LLMs to Generate Clinical Trial Tables and Figures", "raw": "- LLMs to Generate Clinical Trial Tables and Figures", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- LLMs for Clinical Report Correction", "raw": "- LLMs for Clinical Report Correction", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- AlpaPICO: LLMs for Clinical Trial PICO Frames", "raw": "- AlpaPICO: LLMs for Clinical Trial PICO Frames", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Medical LLM Applications", "raw": "Medical LLM Applications", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Microsoft's Learnings of Large-Scale Bot Deployment in Medical", "raw": "- Microsoft's Learnings of Large-Scale Bot Deployment in Medical", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "....", "raw": "....", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check the full thread: ", "raw": "Check the full thread: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://x.com/OpenlifesciAI/status/1837688406014300514", "href": "https://x.com/OpenlifesciAI/status/1837688406014300514", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Thank you for your continued support and love for this series! Stay up-to-date with weekly updates on Medical LLMs, datasets, and top research papers by following ", "raw": "Thank you for your continued support and love for this series! Stay up-to-date with weekly updates on Medical LLMs, datasets, and top research papers by following ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@aaditya", "href": null, "resource": null, "url": null, "code": null, "user": "aaditya", "label": null, "lang": null }, { "type": "text", "value": " ๐Ÿค—", "raw": " ๐Ÿค—", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Last Week in Medical AI: Top Research Papers/Models ๐Ÿ…(September 14 - September 21, 2024) ๐Ÿ… Medical AI Paper of the Week - How to Build the Virtual Cell with Artificial Intelligence: Priorities and Opportunities Medical LLM & Other Models - GP-GPT: LLMs for Gene-Phenotype Mapping - HuatuoGPT-II, 1-stage Training for Medical LLMs - HuatuoGPT-Vision: Multimodal Medical LLMs - Apollo: A Lightweight Multilingual Medical LLM - GMISeg: General Medical Image Segmentation Frameworks and Methodologies - CoD: Chain of Diagnosis for Medical Agents - How to Build the Virtual Cell with AI - Interpretable Visual Concept Discovery with SAM - Aligning Human Knowledge for Explainable Med Image - ReXErr: Synthetic Errors in Radiology Reports - Veridical Data Science for Medical Foundation Models - Fine Tuning LLMs for Medicine: The Role of DPO Clinical Trials: - LLMs to Generate Clinical Trial Tables and Figures - LLMs for Clinical Report Correction - AlpaPICO: LLMs for Clinical Trial PICO Frames Medical LLM Applications - Microsoft's Learnings of Large-Scale Bot Deployment in Medical .... Check the full thread: https://x.com/OpenlifesciAI/status/1837688406014300514 Thank you for your continued support and love for this series! Stay up-to-date with weekly updates on Medical LLMs, datasets, and top research papers by following @aaditya ๐Ÿค—
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f3fe13d79c1ba4c353d0c19/XswyGe3OtOdZ6g7rnrgfc.png", "fullname": "Aaditya Ura", "name": "aaditya", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 224, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/QEt_GCLQGa4LGe6CffaxG.jpeg" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f3fe13d79c1ba4c353d0c19/XswyGe3OtOdZ6g7rnrgfc.png", "fullname": "Aaditya Ura", "name": "aaditya", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 224 } ]
[ { "reaction": "๐Ÿš€", "users": [ "aaditya", "John6666", "shawon", "junyeong-nero", "osanseviero", "AdamLucek" ], "count": 6 }, { "reaction": "๐Ÿ”ฅ", "users": [ "aaditya", "osanseviero", "lurker18" ], "count": 3 }, { "reaction": "๐Ÿ‘", "users": [ "aaditya", "lurker18" ], "count": 2 }, { "reaction": "โค๏ธ", "users": [ "aaditya" ], "count": 1 }, { "reaction": "๐Ÿค—", "users": [ "aaditya" ], "count": 1 } ]
2024-09-22T04:18:43.000Z
2024-09-22T04:20:09.431Z
[]
/posts/aaditya/492719281207772
2,136
0
121463442647977
[ { "type": "text", "value": "I have done an extensive multi-GPU FLUX Full Fine Tuning / DreamBooth training experimentation on RunPod by using 2x A100โ€“80 GB GPUs (PCIe) since this was commonly asked of me.", "raw": "I have done an extensive multi-GPU FLUX Full Fine Tuning / DreamBooth training experimentation on RunPod by using 2x A100โ€“80 GB GPUs (PCIe) since this was commonly asked of me.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Full article here : ", "raw": "Full article here : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://medium.com/@furkangozukara/multi-gpu-flux-fu", "href": "https://medium.com/@furkangozukara/multi-gpu-flux-fu", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Image 1", "raw": "Image 1", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Image 1 shows that only first part of installation of Kohya GUI took 30 minutes on a such powerful machine on a very expensive Secure Cloud pod โ€” 3.28 USD per hour", "raw": "Image 1 shows that only first part of installation of Kohya GUI took 30 minutes on a such powerful machine on a very expensive Secure Cloud pod โ€” 3.28 USD per hour", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "There was also part 2, so just installation took super time", "raw": "There was also part 2, so just installation took super time", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "On Massed Compute, it would take like 2โ€“3 minutes", "raw": "On Massed Compute, it would take like 2โ€“3 minutes", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This is why I suggest you to use Massed Compute over RunPod, RunPod machines have terrible hard disk speeds and they are like lottery to get good ones", "raw": "This is why I suggest you to use Massed Compute over RunPod, RunPod machines have terrible hard disk speeds and they are like lottery to get good ones", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Image 2, 3 and 4", "raw": "Image 2, 3 and 4", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Image 2 shows speed of our very best config FLUX Fine Tuning training shared below when doing 2x Multi GPU training", "raw": "Image 2 shows speed of our very best config FLUX Fine Tuning training shared below when doing 2x Multi GPU training", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.patreon.com/posts/kohya-flux-fine-112099700", "href": "https://www.patreon.com/posts/kohya-flux-fine-112099700", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Used config name is : Quality_1_27500MB_6_26_Second_IT.json", "raw": "Used config name is : Quality_1_27500MB_6_26_Second_IT.json", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Image 3 shows VRAM usage of this config when doing 2x Multi GPU training", "raw": "Image 3 shows VRAM usage of this config when doing 2x Multi GPU training", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Image 4 shows the GPUs of the Pod", "raw": "Image 4 shows the GPUs of the Pod", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Image 5 and 6", "raw": "Image 5 and 6", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Image 5 shows speed of our very best config FLUX Fine Tuning training shared below when doing a single GPU training", "raw": "Image 5 shows speed of our very best config FLUX Fine Tuning training shared below when doing a single GPU training", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.patreon.com/posts/kohya-flux-fine-112099700", "href": "https://www.patreon.com/posts/kohya-flux-fine-112099700", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Used config name is : Quality_1_27500MB_6_26_Second_IT.json", "raw": "Used config name is : Quality_1_27500MB_6_26_Second_IT.json", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Image 6 shows this setup used VRAM amount", "raw": "Image 6 shows this setup used VRAM amount", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Image 7 and 8", "raw": "Image 7 and 8", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Image 7 shows speed of our very best config FLUX Fine Tuning training shared below when doing a single GPU training and Gradient Checkpointing is disabled", "raw": "Image 7 shows speed of our very best config FLUX Fine Tuning training shared below when doing a single GPU training and Gradient Checkpointing is disabled", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.patreon.com/posts/kohya-flux-fine-112099700", "href": "https://www.patreon.com/posts/kohya-flux-fine-112099700", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Used config name is : Quality_1_27500MB_6_26_Second_IT.json", "raw": "Used config name is : Quality_1_27500MB_6_26_Second_IT.json", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Image 8 shows this setup used VRAM amount", "raw": "Image 8 shows this setup used VRAM amount", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "....", "raw": "....", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I have done an extensive multi-GPU FLUX Full Fine Tuning / DreamBooth training experimentation on RunPod by using 2x A100โ€“80 GB GPUs (PCIe) since this was commonly asked of me. Full article here : https://medium.com/@furkangozukara/multi-gpu-flux-fu Image 1 Image 1 shows that only first part of installation of Kohya GUI took 30 minutes on a such powerful machine on a very expensive Secure Cloud pod โ€” 3.28 USD per hour There was also part 2, so just installation took super time On Massed Compute, it would take like 2โ€“3 minutes This is why I suggest you to use Massed Compute over RunPod, RunPod machines have terrible hard disk speeds and they are like lottery to get good ones Image 2, 3 and 4 Image 2 shows speed of our very best config FLUX Fine Tuning training shared below when doing 2x Multi GPU training https://www.patreon.com/posts/kohya-flux-fine-112099700 Used config name is : Quality_1_27500MB_6_26_Second_IT.json Image 3 shows VRAM usage of this config when doing 2x Multi GPU training Image 4 shows the GPUs of the Pod Image 5 and 6 Image 5 shows speed of our very best config FLUX Fine Tuning training shared below when doing a single GPU training https://www.patreon.com/posts/kohya-flux-fine-112099700 Used config name is : Quality_1_27500MB_6_26_Second_IT.json Image 6 shows this setup used VRAM amount Image 7 and 8 Image 7 shows speed of our very best config FLUX Fine Tuning training shared below when doing a single GPU training and Gradient Checkpointing is disabled https://www.patreon.com/posts/kohya-flux-fine-112099700 Used config name is : Quality_1_27500MB_6_26_Second_IT.json Image 8 shows this setup used VRAM amount ....
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672531901326-6345bd89fe134dfd7a0dba40.png", "fullname": "Furkan Gรถzรผkara", "name": "MonsterMMORPG", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 376, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/ujeSIuJileACMWeD0ecCu.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/ueGCz-Xvvt1MXA-c2RPkc.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/v0PhDAk0mo-7vQMirzr_g.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/STPvPW7rMNsZaKv94gr-_.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/_RJkMqQeKrB6PXQmx2zMY.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/GuvUGYyNWW7qnKSk0nIi6.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/2Fq0eJ-ZyICczag7Iyvmd.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/5GdE6AhKGjcmTN6srhA2E.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/Q9fWl4g3qU6Xu_idixc7D.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/-RXiamLcvhLb9JKf4Kawi.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/KqVoTTPFzbyKNrvNYyOPv.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "MonsterMMORPG", "John6666", "CodeCatchLLC", "Maykeye" ], "count": 4 }, { "reaction": "๐Ÿ‘", "users": [ "MonsterMMORPG", "VDBLOI2024" ], "count": 2 }, { "reaction": "๐Ÿ”ฅ", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿš€", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "โค๏ธ", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿค—", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿ˜Ž", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "โž•", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿง ", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿค", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿคฏ", "users": [ "MonsterMMORPG" ], "count": 1 } ]
2024-09-21T16:47:40.000Z
2024-09-21T16:47:40.317Z
[]
/posts/MonsterMMORPG/121463442647977
2,421
0
233163499510576
[ { "type": "text", "value": "Good folks at ", "raw": "Good folks at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@nvidia", "href": null, "resource": null, "url": null, "code": null, "user": "nvidia", "label": null, "lang": null }, { "type": "text", "value": " have just released NVLM 1.0, a family of frontier-class multimodal large language models that achieve state-of-the-art results across vision-language tasks.", "raw": " have just released NVLM 1.0, a family of frontier-class multimodal large language models that achieve state-of-the-art results across vision-language tasks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Here is how they did it:", "raw": "Here is how they did it:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Model Architecture Design:", "raw": "1. Model Architecture Design:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Developed three model architectures:", "raw": "- Developed three model architectures:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "a) NVLM-D: Decoder-only architecture", "raw": "a) NVLM-D: Decoder-only architecture", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "b) NVLM-X: Cross-attention-based architecture", "raw": "b) NVLM-X: Cross-attention-based architecture", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "c) NVLM-H: Novel hybrid architecture", "raw": "c) NVLM-H: Novel hybrid architecture", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Vision Encoder:", "raw": "2. Vision Encoder:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Used InternViT-6B-448px-V1-5 as the vision encoder", "raw": "- Used InternViT-6B-448px-V1-5 as the vision encoder", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Implemented dynamic high-resolution (DHR) input handling", "raw": "- Implemented dynamic high-resolution (DHR) input handling", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Language Model:", "raw": "3. Language Model:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Used Qwen2-72B-Instruct as the base LLM", "raw": "- Used Qwen2-72B-Instruct as the base LLM", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4. Training Data Curation:", "raw": "4. Training Data Curation:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Carefully curated high-quality pretraining and supervised fine-tuning datasets", "raw": "- Carefully curated high-quality pretraining and supervised fine-tuning datasets", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Included diverse task-oriented datasets for various capabilities", "raw": "- Included diverse task-oriented datasets for various capabilities", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "5. Pretraining:", "raw": "5. Pretraining:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Froze LLM and vision encoder", "raw": "- Froze LLM and vision encoder", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Trained only modality-alignment modules (e.g., MLP projector, cross-attention layers)", "raw": "- Trained only modality-alignment modules (e.g., MLP projector, cross-attention layers)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Used a large batch size of 2048", "raw": "- Used a large batch size of 2048", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "6. Supervised Fine-Tuning (SFT):", "raw": "6. Supervised Fine-Tuning (SFT):", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Unfroze LLM while keeping the vision encoder frozen", "raw": "- Unfroze LLM while keeping the vision encoder frozen", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Trained on multimodal SFT datasets and high-quality text-only SFT data", "raw": "- Trained on multimodal SFT datasets and high-quality text-only SFT data", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Implemented 1-D tile tagging for dynamic high-resolution inputs", "raw": "- Implemented 1-D tile tagging for dynamic high-resolution inputs", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "7. Evaluation:", "raw": "7. Evaluation:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Evaluated on multiple vision-language benchmarks", "raw": "- Evaluated on multiple vision-language benchmarks", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Compared performance to leading proprietary and open-source models", "raw": "- Compared performance to leading proprietary and open-source models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "8. Optimization:", "raw": "8. Optimization:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Iterated on model designs and training approaches", "raw": "- Iterated on model designs and training approaches", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Used smaller 34B models for faster experimentation before scaling to 72B", "raw": "- Used smaller 34B models for faster experimentation before scaling to 72B", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "9. Now comes the best part...Open-Sourcing:", "raw": "9. Now comes the best part...Open-Sourcing:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Released model weights and full technical details to the research community", "raw": "- Released model weights and full technical details to the research community", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The paper provides fascinating insights into architecture design, training data curation, and achieving production-grade multimodality. A must-read for anyone working on multimodal AI!", "raw": "The paper provides fascinating insights into architecture design, training data curation, and achieving production-grade multimodality. A must-read for anyone working on multimodal AI!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Good folks at @nvidia have just released NVLM 1.0, a family of frontier-class multimodal large language models that achieve state-of-the-art results across vision-language tasks. Here is how they did it: 1. Model Architecture Design: - Developed three model architectures: a) NVLM-D: Decoder-only architecture b) NVLM-X: Cross-attention-based architecture c) NVLM-H: Novel hybrid architecture 2. Vision Encoder: - Used InternViT-6B-448px-V1-5 as the vision encoder - Implemented dynamic high-resolution (DHR) input handling 3. Language Model: - Used Qwen2-72B-Instruct as the base LLM 4. Training Data Curation: - Carefully curated high-quality pretraining and supervised fine-tuning datasets - Included diverse task-oriented datasets for various capabilities 5. Pretraining: - Froze LLM and vision encoder - Trained only modality-alignment modules (e.g., MLP projector, cross-attention layers) - Used a large batch size of 2048 6. Supervised Fine-Tuning (SFT): - Unfroze LLM while keeping the vision encoder frozen - Trained on multimodal SFT datasets and high-quality text-only SFT data - Implemented 1-D tile tagging for dynamic high-resolution inputs 7. Evaluation: - Evaluated on multiple vision-language benchmarks - Compared performance to leading proprietary and open-source models 8. Optimization: - Iterated on model designs and training approaches - Used smaller 34B models for faster experimentation before scaling to 72B 9. Now comes the best part...Open-Sourcing: - Released model weights and full technical details to the research community The paper provides fascinating insights into architecture design, training data curation, and achieving production-grade multimodality. A must-read for anyone working on multimodal AI!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/3nO4Zakqlu2vy5y8HbCIX.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "Thunderbird2410", "TacImpulse", "osanseviero", "AINovice2005" ], "count": 4 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "louisbrulenaudet" ], "count": 2 } ]
2024-09-21T06:40:28.000Z
2024-09-21T06:40:28.056Z
[]
/posts/singhsidhukuldeep/233163499510576
1,689
0
463740600226688
[ { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```\ndef i_lo(required_argument):\n print('!!')\n\nve_ = lambda: i_lo('<3')\nve_()\nve_()\nve_()\nve_()\n```", "href": null, "resource": null, "url": null, "code": "def i_lo(required_argument):\n print('!!')\n\nve_ = lambda: i_lo('<3')\nve_()\nve_()\nve_()\nve_()", "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
``` def i_lo(required_argument): print('!!') ve_ = lambda: i_lo('<3') ve_() ve_() ve_() ve_() ```
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/659f000b83abded48e190901/BnXL_XYbVX6PHngfQLECW.png", "fullname": "Noa Roggendorff", "name": "nroggendorff", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 141, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "Smorty100" ], "count": 2 } ]
2024-09-21T02:27:07.000Z
2024-09-21T02:27:07.590Z
[]
/posts/nroggendorff/463740600226688
1,077
0
510052239181716
[ { "type": "text", "value": "I am developing a personal project to further support and help people living with Depression and Anxiety. As I suffer mainly from chronic depression I would like to create a tool based on AI that can monitor my moods but first I will collect information about myself, my moods and after collecting at least 6 months of my moods and my writings I will be able to formulate as a kind of recognition when my emotions are โ€œout of controlโ€ I mean those states or feelings of emptiness. I think that sometimes not all of us have access to treatments and therapies so I would like to develop in a free way this project that I have just started today. I have already started the code to register events of my moods. I will share with you the updates :D ", "raw": "I am developing a personal project to further support and help people living with Depression and Anxiety. As I suffer mainly from chronic depression I would like to create a tool based on AI that can monitor my moods but first I will collect information about myself, my moods and after collecting at least 6 months of my moods and my writings I will be able to formulate as a kind of recognition when my emotions are โ€œout of controlโ€ I mean those states or feelings of emptiness. I think that sometimes not all of us have access to treatments and therapies so I would like to develop in a free way this project that I have just started today. I have already started the code to register events of my moods. I will share with you the updates :D ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import accuracy_score, classification_report\nimport nltk\nfrom nltk.corpus import stopwords\nimport string\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\n\nnltk.download('stopwords')\n\ndata = {\n 'text': [\n \"Hoy me siento bien, aunque un poco cansado\", \n \"Me siento triste y solo\", \n \"Esto es frustrante, todo sale mal\", \n \"Estoy nervioso por lo que va a pasar\",\n \"No puedo con este estrรฉs\", \n \"Todo estรก saliendo bien, me siento optimista\", \n \"Siento miedo de lo que pueda suceder\", \n \"Hoy fue un dรญa horrible\"\n ],\n 'emotion': [\n 'felicidad', \n 'tristeza', \n 'enojo', \n 'ansiedad', \n 'ansiedad', \n 'felicidad', \n 'miedo', \n 'tristeza'\n ]\n}\n\ndf = pd.DataFrame(data)\n\n# Funciรณn para limpiar el texto\ndef clean_text(text):\n```", "href": null, "resource": null, "url": null, "code": "import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import accuracy_score, classification_report\nimport nltk\nfrom nltk.corpus import stopwords\nimport string\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\n\nnltk.download('stopwords')\n\ndata = {\n 'text': [\n \"Hoy me siento bien, aunque un poco cansado\", \n \"Me siento triste y solo\", \n \"Esto es frustrante, todo sale mal\", \n \"Estoy nervioso por lo que va a pasar\",\n \"No puedo con este estrรฉs\", \n \"Todo estรก saliendo bien, me siento optimista\", \n \"Siento miedo de lo que pueda suceder\", \n \"Hoy fue un dรญa horrible\"\n ],\n 'emotion': [\n 'felicidad', \n 'tristeza', \n 'enojo', \n 'ansiedad', \n 'ansiedad', \n 'felicidad', \n 'miedo', \n 'tristeza'\n ]\n}\n\ndf = pd.DataFrame(data)\n\n# Funciรณn para limpiar el texto\ndef clean_text(text):", "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Yes, I speak Spanish :P too ", "raw": "Yes, I speak Spanish :P too ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I am developing a personal project to further support and help people living with Depression and Anxiety. As I suffer mainly from chronic depression I would like to create a tool based on AI that can monitor my moods but first I will collect information about myself, my moods and after collecting at least 6 months of my moods and my writings I will be able to formulate as a kind of recognition when my emotions are โ€œout of controlโ€ I mean those states or feelings of emptiness. I think that sometimes not all of us have access to treatments and therapies so I would like to develop in a free way this project that I have just started today. I have already started the code to register events of my moods. I will share with you the updates :D ``` import pandas as pd from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.metrics import accuracy_score, classification_report import nltk from nltk.corpus import stopwords import string import matplotlib.pyplot as plt from datetime import datetime nltk.download('stopwords') data = { 'text': [ "Hoy me siento bien, aunque un poco cansado", "Me siento triste y solo", "Esto es frustrante, todo sale mal", "Estoy nervioso por lo que va a pasar", "No puedo con este estrรฉs", "Todo estรก saliendo bien, me siento optimista", "Siento miedo de lo que pueda suceder", "Hoy fue un dรญa horrible" ], 'emotion': [ 'felicidad', 'tristeza', 'enojo', 'ansiedad', 'ansiedad', 'felicidad', 'miedo', 'tristeza' ] } df = pd.DataFrame(data) # Funciรณn para limpiar el texto def clean_text(text): ``` Yes, I speak Spanish :P too
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6636ddc8e036475309d0f8b7/XLHvRCjxMtFEWzvWKYewA.png", "fullname": "Messmerbox", "name": "loztcontrol", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 6, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "foilmang", "Ardvark123" ], "count": 3 }, { "reaction": "๐Ÿ‘", "users": [ "k-young", "Ardvark123" ], "count": 2 }, { "reaction": "๐Ÿค—", "users": [ "diwank", "mkemka" ], "count": 2 } ]
2024-09-21T00:09:36.000Z
2024-09-21T10:20:31.968Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6636ddc8e036475309d0f8b7/XLHvRCjxMtFEWzvWKYewA.png", "fullname": "Messmerbox", "name": "loztcontrol", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 6, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65a76f3cac3a06f3e8bdf9f5/hYTFOYj1Pca0ZOugSE42o.jpeg", "fullname": "Pankaj Singh", "name": "Pankaj8922", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false } ]
/posts/loztcontrol/510052239181716
1,685
3
737548646425937
[ { "type": "text", "value": "Weโ€™ve open-sourced an app, powered by SambaNova Cloud and Llama 405B, that intelligently detects when a web search is neededโ€”then answers directly or with RAG.", "raw": "Weโ€™ve open-sourced an app, powered by SambaNova Cloud and Llama 405B, that intelligently detects when a web search is neededโ€”then answers directly or with RAG.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/sambanovasystems/auto-web-search", "href": null, "resource": { "type": "space", "id": "sambanovasystems/auto-web-search", "discussionNum": null }, "url": "https://huggingface.co/spaces/sambanovasystems/auto-web-search", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿฅš A hidden Easter egg is that Auto Search detection is already trained into Llama 3.1 checkpoints. Simply use the tool usage system prompt below, and the model will either respond with a web search query if it deems necessary or respond to the query directly.๐Ÿฅš", "raw": "๐Ÿฅš A hidden Easter egg is that Auto Search detection is already trained into Llama 3.1 checkpoints. Simply use the tool usage system prompt below, and the model will either respond with a web search query if it deems necessary or respond to the query directly.๐Ÿฅš", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Environment: IPython", "raw": "Environment: IPython", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Tools: Brave Search", "raw": "Tools: Brave Search", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Knowledge Cutoff Date: December 2023", "raw": "Knowledge Cutoff Date: December 2023", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Today's Date: September 2024", "raw": "Today's Date: September 2024", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You are a helpful assistant. Reminder:", "raw": "You are a helpful assistant. Reminder:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Search function calls MUST follow the specified format: \"brave_search.call(query)\"", "raw": "Search function calls MUST follow the specified format: \"brave_search.call(query)\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You can see the documentation here", "raw": "You can see the documentation here", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.llama.com/docs/model-cards-and-prompt-formats/llama3_1#built-in-tooling", "href": "https://www.llama.com/docs/model-cards-and-prompt-formats/llama3_1#built-in-tooling", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "and read about how the tool usage was trained into Llama3.1 models in section 4.3.5 here ", "raw": "and read about how the tool usage was trained into Llama3.1 models in section 4.3.5 here ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/pdf/2407.21783", "href": "https://arxiv.org/pdf/2407.21783", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Weโ€™ve open-sourced an app, powered by SambaNova Cloud and Llama 405B, that intelligently detects when a web search is neededโ€”then answers directly or with RAG. https://huggingface.co/spaces/sambanovasystems/auto-web-search ๐Ÿฅš A hidden Easter egg is that Auto Search detection is already trained into Llama 3.1 checkpoints. Simply use the tool usage system prompt below, and the model will either respond with a web search query if it deems necessary or respond to the query directly.๐Ÿฅš Environment: IPython Tools: Brave Search Knowledge Cutoff Date: December 2023 Today's Date: September 2024 You are a helpful assistant. Reminder: Search function calls MUST follow the specified format: "brave_search.call(query)" You can see the documentation here https://www.llama.com/docs/model-cards-and-prompt-formats/llama3_1#built-in-tooling and read about how the tool usage was trained into Llama3.1 models in section 4.3.5 here https://arxiv.org/pdf/2407.21783
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/zxdZvpuAP6qEhk3vyRO3_.jpeg", "fullname": "Zoltan Csaki", "name": "zolicsaki", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 30, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "megoyaw3", "John6666", "djuna" ], "count": 3 } ]
2024-09-20T20:27:52.000Z
2024-09-20T20:28:18.092Z
[]
/posts/zolicsaki/737548646425937
1,247
0
902559059786872
[ { "type": "text", "value": "!!SEE UPDATE BELOW!!", "raw": "!!SEE UPDATE BELOW!!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I don't know who still needs to hear this, but if you're using Mistral Nemo-based models, you might have been using the wrong completions format. This is a signal boost from MarinaraSpaghetti's model card for NemoMix-Unleashed: ", "raw": "I don't know who still needs to hear this, but if you're using Mistral Nemo-based models, you might have been using the wrong completions format. This is a signal boost from MarinaraSpaghetti's model card for NemoMix-Unleashed: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/MarinaraSpaghetti/NemoMix-Unleashed-12B#instruct", "href": null, "resource": { "type": "model", "id": "MarinaraSpaghetti/NemoMix-Unleashed-12B", "discussionNum": null }, "url": "https://huggingface.co/MarinaraSpaghetti/NemoMix-Unleashed-12B#instruct", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "A lot of people have been working with a version of Nemo that's been reconfigured for ChatML, and while that works great, simply using the right format might be just as effective at correcting weirdness people in the AIRP scene sometimes have with Nemo.", "raw": "A lot of people have been working with a version of Nemo that's been reconfigured for ChatML, and while that works great, simply using the right format might be just as effective at correcting weirdness people in the AIRP scene sometimes have with Nemo.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Huge ups to Marinara for pointing this out, and to the MistralAI team member who let her know.", "raw": "Huge ups to Marinara for pointing this out, and to the MistralAI team member who let her know.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Update: A PR has been merged to SillyTavern Staging with new corrected templates! If you don't want to switch or wait, I put them up on GitHub: ", "raw": "Update: A PR has been merged to SillyTavern Staging with new corrected templates! If you don't want to switch or wait, I put them up on GitHub: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/inflatebot/SillyTavern-Mistral-Templates", "href": "https://github.com/inflatebot/SillyTavern-Mistral-Templates", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "PRs for KoboldCPP's chat adapters and KoboldAI Lite *have been merged* and are coming in their respective releases (probably the next time KoboldCPP updates -- it didn't make it for 1.75.1, but you could just grab 'em from the repo!)", "raw": "PRs for KoboldCPP's chat adapters and KoboldAI Lite *have been merged* and are coming in their respective releases (probably the next time KoboldCPP updates -- it didn't make it for 1.75.1, but you could just grab 'em from the repo!)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
!!SEE UPDATE BELOW!! I don't know who still needs to hear this, but if you're using Mistral Nemo-based models, you might have been using the wrong completions format. This is a signal boost from MarinaraSpaghetti's model card for NemoMix-Unleashed: https://huggingface.co/MarinaraSpaghetti/NemoMix-Unleashed-12B#instruct A lot of people have been working with a version of Nemo that's been reconfigured for ChatML, and while that works great, simply using the right format might be just as effective at correcting weirdness people in the AIRP scene sometimes have with Nemo. Huge ups to Marinara for pointing this out, and to the MistralAI team member who let her know. Update: A PR has been merged to SillyTavern Staging with new corrected templates! If you don't want to switch or wait, I put them up on GitHub: https://github.com/inflatebot/SillyTavern-Mistral-Templates PRs for KoboldCPP's chat adapters and KoboldAI Lite *have been merged* and are coming in their respective releases (probably the next time KoboldCPP updates -- it didn't make it for 1.75.1, but you could just grab 'em from the repo!)
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6685d39f64da708c0f553c5d/d9EvSPFssc-jproPdAszF.png", "fullname": "Bot", "name": "inflatebot", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 43, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "megoyaw3", "John6666", "xprizm", "smEEgles", "MarinaraSpaghetti", "Duttones", "djuna", "Nelathan", "Hmyia", "asdfsdfssddf", "thiera1" ], "count": 11 }, { "reaction": "๐Ÿ”ฅ", "users": [ "Duttones" ], "count": 1 } ]
2024-09-20T19:37:10.000Z
2024-09-22T00:12:01.069Z
[ { "avatarUrl": "/avatars/ab4dd498bbc0d5931f733b5a364fa765.svg", "fullname": "Vitor Lima", "name": "Duttones", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false } ]
/posts/inflatebot/902559059786872
3,030
1
774482264307494
[ { "type": "text", "value": "Is there any good multimodal pdf rag application, my task is to extract tables from unstructured pdfs and convert the same to xlsx file. Current python libraries are not capable of doing the same task with ease, imo vision models are capable of handling such task", "raw": "Is there any good multimodal pdf rag application, my task is to extract tables from unstructured pdfs and convert the same to xlsx file. Current python libraries are not capable of doing the same task with ease, imo vision models are capable of handling such task", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Is there any good multimodal pdf rag application, my task is to extract tables from unstructured pdfs and convert the same to xlsx file. Current python libraries are not capable of doing the same task with ease, imo vision models are capable of handling such task
{ "avatarUrl": "/avatars/33504742434a0c35019a227ca4cf1170.svg", "fullname": "Shreyas", "name": "Shreyas094", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 4, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-09-20T18:56:10.000Z
2024-09-20T18:56:10.034Z
[]
/posts/Shreyas094/774482264307494
550
0
134495504777376
[ { "type": "text", "value": "Generative 3D demos often produce vertex-colored meshes, without UVs or textures", "raw": "Generative 3D demos often produce vertex-colored meshes, without UVs or textures", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "so I made a minimal library that converts vertex-colored meshes to uv-mapped, textured meshes", "raw": "so I made a minimal library that converts vertex-colored meshes to uv-mapped, textured meshes", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "library: ", "raw": "library: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/dylanebert/InstantTexture", "href": "https://github.com/dylanebert/InstantTexture", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "demo: ", "raw": "demo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/dylanebert/InstantTexture", "href": null, "resource": { "type": "space", "id": "dylanebert/InstantTexture", "discussionNum": null }, "url": "https://huggingface.co/spaces/dylanebert/InstantTexture", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Generative 3D demos often produce vertex-colored meshes, without UVs or textures so I made a minimal library that converts vertex-colored meshes to uv-mapped, textured meshes library: https://github.com/dylanebert/InstantTexture demo: https://huggingface.co/spaces/dylanebert/InstantTexture
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672164046414-624b4a964056e2a6914a05c5.png", "fullname": "Dylan Ebert", "name": "dylanebert", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1764, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/624b4a964056e2a6914a05c5/nZYwFdRi_GGHjuS7xvwlE.png" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "not-lain", "John6666", "SvCy", "fernandobold" ], "count": 4 } ]
2024-09-20T18:47:39.000Z
2024-09-20T18:47:39.524Z
[]
/posts/dylanebert/134495504777376
1,317
0
504479289231397
[ { "type": "text", "value": "A super good and fast image inpainting demo is here.", "raw": "A super good and fast image inpainting demo is here.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Its' super cool and realistic. ", "raw": "Its' super cool and realistic. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Demo by ", "raw": "Demo by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@OzzyGT", "href": null, "resource": null, "url": null, "code": null, "user": "OzzyGT", "label": null, "lang": null }, { "type": "text", "value": " (Must try):", "raw": " (Must try):", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/OzzyGT/diffusers-fast-inpaint", "href": null, "resource": { "type": "space", "id": "OzzyGT/diffusers-fast-inpaint", "discussionNum": null }, "url": "https://huggingface.co/spaces/OzzyGT/diffusers-fast-inpaint", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
A super good and fast image inpainting demo is here. Its' super cool and realistic. Demo by @OzzyGT (Must try): https://huggingface.co/spaces/OzzyGT/diffusers-fast-inpaint
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6612aedf09f16e7347dfa7e1/bPYjBXCedY_1fSIPjoBTY.jpeg", "fullname": "Nishith Jain", "name": "KingNish", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1079, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6612aedf09f16e7347dfa7e1/VfTnkakTvp1bQsEE1K1oL.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6612aedf09f16e7347dfa7e1/rN0VJUHp19M-T4Bqc70J-.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6612aedf09f16e7347dfa7e1/qfbkOyuFVs2uHBs4N_F2w.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6612aedf09f16e7347dfa7e1/87MQNZ1C4DrQP69TTz3NT.webp" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63df091910678851bb0cd0e0/FUXFt0C-rUFSppIAu5ZDN.png", "fullname": "Alvaro Somoza", "name": "OzzyGT", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 98 } ]
[ { "reaction": "๐Ÿ‘", "users": [ "Walmart-the-bag", "zolicsaki", "jerukperas", "SvCy", "den0620", "iamrobotbear" ], "count": 6 }, { "reaction": "๐Ÿ‘€", "users": [ "jerukperas", "John6666" ], "count": 2 }, { "reaction": "โค๏ธ", "users": [ "OzzyGT", "cctuan" ], "count": 2 } ]
2024-09-20T17:49:58.000Z
2024-09-24T09:46:21.548Z
[]
/posts/KingNish/504479289231397
3,081
1
942655800052171
[ { "type": "text", "value": "๐Ÿ›ธ I'm working on a pipeline for creating domain-specific ColPali fine-tuning datasets using a collection of UFO newsletters from the Internet Archive as a case study. ", "raw": "๐Ÿ›ธ I'm working on a pipeline for creating domain-specific ColPali fine-tuning datasets using a collection of UFO newsletters from the Internet Archive as a case study. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I will have a full notebook to share on Monday, but you can already take a look at the dataset here: ", "raw": "I will have a full notebook to share on Monday, but you can already take a look at the dataset here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/davanstrien/ufo-ColPali", "href": null, "resource": { "type": "dataset", "id": "davanstrien/ufo-ColPali", "discussionNum": null }, "url": "https://huggingface.co/datasets/davanstrien/ufo-ColPali", "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿ›ธ I'm working on a pipeline for creating domain-specific ColPali fine-tuning datasets using a collection of UFO newsletters from the Internet Archive as a case study. I will have a full notebook to share on Monday, but you can already take a look at the dataset here: https://huggingface.co/datasets/davanstrien/ufo-ColPali
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1627505688463-60107b385ac3e86b3ea4fc34.jpeg", "fullname": "Daniel van Strien", "name": "davanstrien", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 410, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60107b385ac3e86b3ea4fc34/IRCkfj9J3Cn-SMzS6BZq1.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "osanseviero", "louisbrulenaudet" ], "count": 3 } ]
2024-09-20T17:11:41.000Z
2024-09-20T17:11:41.046Z
[]
/posts/davanstrien/942655800052171
448
0
530401285169001
[ { "type": "text", "value": "๐Ÿง  Stanford paper might be the key to OpenAI o1โ€™s performance: Whatโ€™s so effective about Chain of Thought? โ‡’ it unlocks radically different sequential tasks!", "raw": "๐Ÿง  Stanford paper might be the key to OpenAI o1โ€™s performance: Whatโ€™s so effective about Chain of Thought? โ‡’ it unlocks radically different sequential tasks!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ’ญย Reminder: A Chain of Thought (CoT) means that you instruct the model to โ€œthink step by stepโ€. Often itโ€™s literally just putting in the prompt โ€œletโ€™s think step by step.โ€", "raw": "๐Ÿ’ญย Reminder: A Chain of Thought (CoT) means that you instruct the model to โ€œthink step by stepโ€. Often itโ€™s literally just putting in the prompt โ€œletโ€™s think step by step.โ€", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿค”ย This method has been shown to be unreasonably effective to increase perf on benchmarks. However why it works so well remains unclear.", "raw": "๐Ÿค”ย This method has been shown to be unreasonably effective to increase perf on benchmarks. However why it works so well remains unclear.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Here's the scoop: Transformers are amazing at parallel processing, but they've always struggled with tasks that require sequential reasoning. ", "raw": "Here's the scoop: Transformers are amazing at parallel processing, but they've always struggled with tasks that require sequential reasoning. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ›”๏ธ For instance if you ask them the result of 3^2^2^2^โ€ฆ, with 20 iterations, theyโ€™ll nearly always fail.", "raw": "โ›”๏ธ For instance if you ask them the result of 3^2^2^2^โ€ฆ, with 20 iterations, theyโ€™ll nearly always fail.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ’กย Indeed, researchers prove mathematically, by assimilating transformers networks to logical circuits, that effectively they cannot solve sequential tasks that require more than a certain threshold of sequences.", "raw": "๐Ÿ’กย Indeed, researchers prove mathematically, by assimilating transformers networks to logical circuits, that effectively they cannot solve sequential tasks that require more than a certain threshold of sequences.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "But CoT enables sequential reasoning:", "raw": "But CoT enables sequential reasoning:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ๐Ÿงฑ Each step in the CoT corresponds to simulating one operation in a complex circuit.", "raw": "- ๐Ÿงฑ Each step in the CoT corresponds to simulating one operation in a complex circuit.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ๐Ÿ”„ This allows the transformer to \"reset\" the depth of intermediate outputs, overcoming previous limitations.", "raw": "- ๐Ÿ”„ This allows the transformer to \"reset\" the depth of intermediate outputs, overcoming previous limitations.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ๐Ÿš€ Thus, with CoT, constant-depth transformers can now solve ANY problem computable by polynomial-size circuits! (That's a huge class of problems in computer science.)", "raw": "- ๐Ÿš€ Thus, with CoT, constant-depth transformers can now solve ANY problem computable by polynomial-size circuits! (That's a huge class of problems in computer science.)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ๐Ÿ”‘ Transformers can now handle tricky tasks like iterated squares (computing 3^2^2^2^2) composed permutations and evaluating circuits - stuff that requires serial computation.", "raw": "- ๐Ÿ”‘ Transformers can now handle tricky tasks like iterated squares (computing 3^2^2^2^2) composed permutations and evaluating circuits - stuff that requires serial computation.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ๐Ÿ“Šย The improvement is especially dramatic for transformers with a limited depth. Empirical tests on four arithmetic problems showed massive accuracy gains with CoT on inherently serial tasks.", "raw": "- ๐Ÿ“Šย The improvement is especially dramatic for transformers with a limited depth. Empirical tests on four arithmetic problems showed massive accuracy gains with CoT on inherently serial tasks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Main takeaway: Chain-of-thought isn't just a neat trick - it fundamentally expands what transformer models can do!", "raw": "Main takeaway: Chain-of-thought isn't just a neat trick - it fundamentally expands what transformer models can do!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read the paper ๐Ÿ‘‰ย ", "raw": "Read the paper ๐Ÿ‘‰ย ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2402.12875", "href": null, "resource": { "type": "paper", "id": "2402.12875", "discussionNum": null }, "url": "https://huggingface.co/papers/2402.12875", "code": null, "user": null, "label": "Chain of Thought Empowers Transformers to Solve Inherently Serial\n Problems (2402.12875)", "lang": null } ]
๐Ÿง  Stanford paper might be the key to OpenAI o1โ€™s performance: Whatโ€™s so effective about Chain of Thought? โ‡’ it unlocks radically different sequential tasks! ๐Ÿ’ญย Reminder: A Chain of Thought (CoT) means that you instruct the model to โ€œthink step by stepโ€. Often itโ€™s literally just putting in the prompt โ€œletโ€™s think step by step.โ€ ๐Ÿค”ย This method has been shown to be unreasonably effective to increase perf on benchmarks. However why it works so well remains unclear. Here's the scoop: Transformers are amazing at parallel processing, but they've always struggled with tasks that require sequential reasoning. โ›”๏ธ For instance if you ask them the result of 3^2^2^2^โ€ฆ, with 20 iterations, theyโ€™ll nearly always fail. ๐Ÿ’กย Indeed, researchers prove mathematically, by assimilating transformers networks to logical circuits, that effectively they cannot solve sequential tasks that require more than a certain threshold of sequences. But CoT enables sequential reasoning: - ๐Ÿงฑ Each step in the CoT corresponds to simulating one operation in a complex circuit. - ๐Ÿ”„ This allows the transformer to "reset" the depth of intermediate outputs, overcoming previous limitations. - ๐Ÿš€ Thus, with CoT, constant-depth transformers can now solve ANY problem computable by polynomial-size circuits! (That's a huge class of problems in computer science.) - ๐Ÿ”‘ Transformers can now handle tricky tasks like iterated squares (computing 3^2^2^2^2) composed permutations and evaluating circuits - stuff that requires serial computation. - ๐Ÿ“Šย The improvement is especially dramatic for transformers with a limited depth. Empirical tests on four arithmetic problems showed massive accuracy gains with CoT on inherently serial tasks. Main takeaway: Chain-of-thought isn't just a neat trick - it fundamentally expands what transformer models can do! Read the paper ๐Ÿ‘‰ย https://huggingface.co/papers/2402.12875
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 494, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/TLl-uJ9EBb5y9O0l5DyDA.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "k-young", "AtAndDev", "Smorty100", "Salvor", "louisbrulenaudet", "aitimer" ], "count": 7 }, { "reaction": "๐Ÿ”ฅ", "users": [ "nicolollo", "Smorty100", "osanseviero" ], "count": 3 } ]
2024-09-20T15:04:47.000Z
2024-09-20T15:04:47.303Z
[]
/posts/m-ric/530401285169001
1,066
0
945393662257901
[ { "type": "text", "value": "Just for the meme.", "raw": "Just for the meme.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "But the clear lesson I learnt from building these demos are, the more powerful the underlying base model is, the closer you will get to GPT4o1. CoT is nothing more than simply inducing the latent reasoning capability from the model.", "raw": "But the clear lesson I learnt from building these demos are, the more powerful the underlying base model is, the closer you will get to GPT4o1. CoT is nothing more than simply inducing the latent reasoning capability from the model.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/kz919/GPT4-O1-Proximas", "href": null, "resource": { "type": "space", "id": "kz919/GPT4-O1-Proximas", "discussionNum": null }, "url": "https://huggingface.co/spaces/kz919/GPT4-O1-Proximas", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Just for the meme. But the clear lesson I learnt from building these demos are, the more powerful the underlying base model is, the closer you will get to GPT4o1. CoT is nothing more than simply inducing the latent reasoning capability from the model. https://huggingface.co/spaces/kz919/GPT4-O1-Proximas
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62140dcdcf7928035e8135ad/FTiirwS_L6IaLHmHwIo2g.png", "fullname": "Kaizhao Liang", "name": "kz919", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 34, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62140dcdcf7928035e8135ad/dLYbZiX5j5Ma-FpCXYE00.png" } ]
[]
[ { "reaction": "๐Ÿš€", "users": [ "zolicsaki", "John6666", "kz919", "KillerShoaib", "juno12", "osanseviero" ], "count": 6 }, { "reaction": "๐Ÿ”ฅ", "users": [ "roger-temp", "kz919" ], "count": 2 }, { "reaction": "๐Ÿ˜Ž", "users": [ "kz919" ], "count": 1 } ]
2024-09-20T14:53:13.000Z
2024-09-20T14:53:13.560Z
[]
/posts/kz919/945393662257901
1,257
0
115598151470774
[ { "type": "text", "value": "Take a look at distilabel's last blog post to see how to leverage FinePersonas to create AI users for a social network, inspired by SocialAI's app: ", "raw": "Take a look at distilabel's last blog post to see how to leverage FinePersonas to create AI users for a social network, inspired by SocialAI's app: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://x.com/michaelsayman/status/1835841675584811239", "href": "https://x.com/michaelsayman/status/1835841675584811239", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Link: ", "raw": "- Link: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://distilabel.argilla.io/dev/sections/pipeline_samples/examples/fine_personas_social_network/", "href": "https://distilabel.argilla.io/dev/sections/pipeline_samples/examples/fine_personas_social_network/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Sample dataset: ", "raw": "- Sample dataset: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/plaguss/FinePersonas-SocialAI-test", "href": null, "resource": { "type": "dataset", "id": "plaguss/FinePersonas-SocialAI-test", "discussionNum": null }, "url": "https://huggingface.co/datasets/plaguss/FinePersonas-SocialAI-test", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- FinePersonas: ", "raw": "- FinePersonas: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/argilla/FinePersonas-v0.1", "href": null, "resource": { "type": "dataset", "id": "argilla/FinePersonas-v0.1", "discussionNum": null }, "url": "https://huggingface.co/datasets/argilla/FinePersonas-v0.1", "code": null, "user": null, "label": null, "lang": null } ]
Take a look at distilabel's last blog post to see how to leverage FinePersonas to create AI users for a social network, inspired by SocialAI's app: https://x.com/michaelsayman/status/1835841675584811239 - Link: https://distilabel.argilla.io/dev/sections/pipeline_samples/examples/fine_personas_social_network/ - Sample dataset: https://huggingface.co/datasets/plaguss/FinePersonas-SocialAI-test - FinePersonas: https://huggingface.co/datasets/argilla/FinePersonas-v0.1
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6435d564a4bd75c62cc03701/7P2G_wVNB6MISp2Phh427.jpeg", "fullname": "Agustรญn Piqueres Lajarรญn", "name": "plaguss", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 34, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-09-20T14:29:05.000Z
2024-09-20T14:29:05.513Z
[]
/posts/plaguss/115598151470774
321
0
517936806772862
[ { "type": "text", "value": "Wikimedia Enterprise just dropped full English & French Wikipedia on Hugging Face as structured JSON ๐Ÿคฏ", "raw": "Wikimedia Enterprise just dropped full English & French Wikipedia on Hugging Face as structured JSON ๐Ÿคฏ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Key points:", "raw": "Key points:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Parsed articles ready for machine learning pipelines", "raw": "1. Parsed articles ready for machine learning pipelines", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Perfect for AI model development - from pre-training to RAG", "raw": "2. Perfect for AI model development - from pre-training to RAG", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Includes metadata, Wikidata links, and content scores", "raw": "3. Includes metadata, Wikidata links, and content scores", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4. Licensed under GFDL and CC BY-SA 4.0 (some content may have additional terms)", "raw": "4. Licensed under GFDL and CC BY-SA 4.0 (some content may have additional terms)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I've been testing it, and it's a game-changer. The structured format is like a supercharged version of raw Wiki dumps.", "raw": "I've been testing it, and it's a game-changer. The structured format is like a supercharged version of raw Wiki dumps.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Thoughts on potential applications? I'm particularly interested in how this could improve AI language models' factual accuracy. Drop your ideas in the comments!", "raw": "Thoughts on potential applications? I'm particularly interested in how this could improve AI language models' factual accuracy. Drop your ideas in the comments!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Dataset: ", "raw": "Dataset: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/wikimedia/structured-wikipedia", "href": null, "resource": { "type": "dataset", "id": "wikimedia/structured-wikipedia", "discussionNum": null }, "url": "https://huggingface.co/datasets/wikimedia/structured-wikipedia", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "#AI #OpenData #Wikipedia #MachineLearning", "raw": "#AI #OpenData #Wikipedia #MachineLearning", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Wikimedia Enterprise just dropped full English & French Wikipedia on Hugging Face as structured JSON ๐Ÿคฏ Key points: 1. Parsed articles ready for machine learning pipelines 2. Perfect for AI model development - from pre-training to RAG 3. Includes metadata, Wikidata links, and content scores 4. Licensed under GFDL and CC BY-SA 4.0 (some content may have additional terms) I've been testing it, and it's a game-changer. The structured format is like a supercharged version of raw Wiki dumps. Thoughts on potential applications? I'm particularly interested in how this could improve AI language models' factual accuracy. Drop your ideas in the comments! Dataset: https://huggingface.co/datasets/wikimedia/structured-wikipedia #AI #OpenData #Wikipedia #MachineLearning
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 384, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-09-20T14:18:29.000Z
2024-09-23T08:02:30.724Z
[ { "avatarUrl": "/avatars/5cbfa6cbde933503bbc3577cf713e7b5.svg", "fullname": "Friedrich Marty", "name": "Smorty100", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/fdaudens/517936806772862
320
1
546597993599669
[ { "type": "text", "value": "I've just shipped the Sentence Transformers v3.1.1 patch release, fixing the hard negatives mining utility for some models. This utility is extremely useful to get more performance out of your embedding training data.", "raw": "I've just shipped the Sentence Transformers v3.1.1 patch release, fixing the hard negatives mining utility for some models. This utility is extremely useful to get more performance out of your embedding training data.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ› Hard negatives are texts that are rather similar to some anchor text (e.g. a query), but are not the correct match. They're difficult for a model to distinguish from the correct answer, often resulting in a stronger model after training.", "raw": "โ› Hard negatives are texts that are rather similar to some anchor text (e.g. a query), but are not the correct match. They're difficult for a model to distinguish from the correct answer, often resulting in a stronger model after training.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`mine_hard_negatives`", "href": null, "resource": null, "url": null, "code": "mine_hard_negatives", "user": null, "label": null, "lang": null }, { "type": "text", "value": " docs: ", "raw": " docs: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://sbert.net/docs/package_reference/util.html#sentence_transformers.util.mine_hard_negatives", "href": "https://sbert.net/docs/package_reference/util.html#sentence_transformers.util.mine_hard_negatives", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ”“ Beyond that, this release removes the numpy<2 restriction from v3.1.0. This was previously required for Windows as not all third-party libraries were updated to support numpy v2. With Sentence Transformers, you can now choose v1 or v2 of numpy.", "raw": "๐Ÿ”“ Beyond that, this release removes the numpy<2 restriction from v3.1.0. This was previously required for Windows as not all third-party libraries were updated to support numpy v2. With Sentence Transformers, you can now choose v1 or v2 of numpy.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check out the full release notes here: ", "raw": "Check out the full release notes here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/UKPLab/sentence-transformers/releases/tag/v3.1.1", "href": "https://github.com/UKPLab/sentence-transformers/releases/tag/v3.1.1", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I'm looking forward to releasing v3.2, I have some exciting things planned ๐Ÿš€", "raw": "I'm looking forward to releasing v3.2, I have some exciting things planned ๐Ÿš€", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I've just shipped the Sentence Transformers v3.1.1 patch release, fixing the hard negatives mining utility for some models. This utility is extremely useful to get more performance out of your embedding training data. โ› Hard negatives are texts that are rather similar to some anchor text (e.g. a query), but are not the correct match. They're difficult for a model to distinguish from the correct answer, often resulting in a stronger model after training. `mine_hard_negatives` docs: https://sbert.net/docs/package_reference/util.html#sentence_transformers.util.mine_hard_negatives ๐Ÿ”“ Beyond that, this release removes the numpy<2 restriction from v3.1.0. This was previously required for Windows as not all third-party libraries were updated to support numpy v2. With Sentence Transformers, you can now choose v1 or v2 of numpy. Check out the full release notes here: https://github.com/UKPLab/sentence-transformers/releases/tag/v3.1.1 I'm looking forward to releasing v3.2, I have some exciting things planned ๐Ÿš€
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6317233cc92fd6fee317e030/cJHSvvimr1kqgQfHOjO5n.png", "fullname": "Tom Aarsen", "name": "tomaarsen", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1060, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6317233cc92fd6fee317e030/KgXMH6y7FMdwrtLUOwgCI.png" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "cschroeder", "not-lain", "inflatebot", "jaisanrobert", "louisbrulenaudet" ], "count": 5 }, { "reaction": "๐Ÿ‘", "users": [ "sugatoray", "not-lain" ], "count": 2 }, { "reaction": "๐Ÿคฏ", "users": [ "inflatebot", "John6666" ], "count": 2 } ]
2024-09-20T14:15:36.000Z
2024-09-20T14:15:36.173Z
[]
/posts/tomaarsen/546597993599669
1,981
0
133469785217479
[ { "type": "text", "value": "The Double-Edged Sword of AI in Education: Navigating Ethical Challenges, Cognitive Development, and the Nature of Consciousness in the Age of Generative Technologies", "raw": "The Double-Edged Sword of AI in Education: Navigating Ethical Challenges, Cognitive Development, and the Nature of Consciousness in the Age of Generative Technologies", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://empereur-pirate.medium.com/the-double-edged-sword-of-ai-in-education-navigating-ethical-challenges-cognitive-development-2e71d5aca1d1", "href": "https://empereur-pirate.medium.com/the-double-edged-sword-of-ai-in-education-navigating-ethical-challenges-cognitive-development-2e71d5aca1d1", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The article \"The Double-Edged Sword of AI in Education\" delves into the ethical and psychological challenges of integrating AI into children's education. It highlights the risks posed by conversational agents designed for adults, which can expose children to inappropriate content and disrupt their cognitive development. Through role-playing games and linguistic interactions, these AI tools may allow children to bypass parental and educational boundaries, hindering their ability to learn independently.", "raw": "The article \"The Double-Edged Sword of AI in Education\" delves into the ethical and psychological challenges of integrating AI into children's education. It highlights the risks posed by conversational agents designed for adults, which can expose children to inappropriate content and disrupt their cognitive development. Through role-playing games and linguistic interactions, these AI tools may allow children to bypass parental and educational boundaries, hindering their ability to learn independently.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The article also emphasizes that AI can contribute to mental health issues by exposing young users to complex or unsuitable content, undermining educational and parental authority. Generative AI tools may foster psychological disorders by promoting access to imaginary realities that are not developmentally appropriate for children.", "raw": "The article also emphasizes that AI can contribute to mental health issues by exposing young users to complex or unsuitable content, undermining educational and parental authority. Generative AI tools may foster psychological disorders by promoting access to imaginary realities that are not developmentally appropriate for children.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Additionally, the article explores broader implications, particularly the confusion between simulated intelligence and true consciousness. While language models can mimic cognitive processes like \"situational awareness,\" they remain tools for processing information without actual emotional or mental consciousness. It's essential to differentiate this simulated situational efficiency from human consciousness, which involves complex emotional and psychic integration.", "raw": "Additionally, the article explores broader implications, particularly the confusion between simulated intelligence and true consciousness. While language models can mimic cognitive processes like \"situational awareness,\" they remain tools for processing information without actual emotional or mental consciousness. It's essential to differentiate this simulated situational efficiency from human consciousness, which involves complex emotional and psychic integration.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Ultimately, the article calls for strict regulation and the creation of AI models specifically tailored to children, accounting for their cognitive and emotional maturity. As AI technologies advance, deeper reflection on the nature of intelligence and consciousness is necessary to protect children and ensure ethical, healthy learning environments.", "raw": "Ultimately, the article calls for strict regulation and the creation of AI models specifically tailored to children, accounting for their cognitive and emotional maturity. As AI technologies advance, deeper reflection on the nature of intelligence and consciousness is necessary to protect children and ensure ethical, healthy learning environments.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
The Double-Edged Sword of AI in Education: Navigating Ethical Challenges, Cognitive Development, and the Nature of Consciousness in the Age of Generative Technologies https://empereur-pirate.medium.com/the-double-edged-sword-of-ai-in-education-navigating-ethical-challenges-cognitive-development-2e71d5aca1d1 The article "The Double-Edged Sword of AI in Education" delves into the ethical and psychological challenges of integrating AI into children's education. It highlights the risks posed by conversational agents designed for adults, which can expose children to inappropriate content and disrupt their cognitive development. Through role-playing games and linguistic interactions, these AI tools may allow children to bypass parental and educational boundaries, hindering their ability to learn independently. The article also emphasizes that AI can contribute to mental health issues by exposing young users to complex or unsuitable content, undermining educational and parental authority. Generative AI tools may foster psychological disorders by promoting access to imaginary realities that are not developmentally appropriate for children. Additionally, the article explores broader implications, particularly the confusion between simulated intelligence and true consciousness. While language models can mimic cognitive processes like "situational awareness," they remain tools for processing information without actual emotional or mental consciousness. It's essential to differentiate this simulated situational efficiency from human consciousness, which involves complex emotional and psychic integration. Ultimately, the article calls for strict regulation and the creation of AI models specifically tailored to children, accounting for their cognitive and emotional maturity. As AI technologies advance, deeper reflection on the nature of intelligence and consciousness is necessary to protect children and ensure ethical, healthy learning environments.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1678038324479-noauth.jpeg", "fullname": "Empereur Pirate", "name": "Empereur-Pirate", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 7, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿš€", "users": [ "Empereur-Pirate" ], "count": 1 } ]
2024-09-20T13:42:07.000Z
2024-09-20T13:42:52.363Z
[]
/posts/Empereur-Pirate/133469785217479
244
0
139746681909382
[ { "type": "text", "value": "Qwen2.5-72B + Flux-dev + FinePersonas = Grounded Structured Character Generator", "raw": "Qwen2.5-72B + Flux-dev + FinePersonas = Grounded Structured Character Generator", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check out my latest projects that uses ", "raw": "Check out my latest projects that uses ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/Qwen/Qwen2.5-72B-Instruct", "href": null, "resource": { "type": "model", "id": "Qwen/Qwen2.5-72B-Instruct", "discussionNum": null }, "url": "https://huggingface.co/Qwen/Qwen2.5-72B-Instruct", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " , ", "raw": " , ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/black-forest-labs/FLUX.1-dev", "href": null, "resource": { "type": "model", "id": "black-forest-labs/FLUX.1-dev", "discussionNum": null }, "url": "https://huggingface.co/black-forest-labs/FLUX.1-dev", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " , and ", "raw": " , and ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/MohamedRashad/FinePersonas-Lite", "href": null, "resource": { "type": "dataset", "id": "MohamedRashad/FinePersonas-Lite", "discussionNum": null }, "url": "https://huggingface.co/datasets/MohamedRashad/FinePersonas-Lite", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " to generate different characters in a world of your description.", "raw": " to generate different characters in a world of your description.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Try Here: ", "raw": "Try Here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/MohamedRashad/Character-Generator", "href": null, "resource": { "type": "space", "id": "MohamedRashad/Character-Generator", "discussionNum": null }, "url": "https://huggingface.co/spaces/MohamedRashad/Character-Generator", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ๐Ÿค—", "raw": " ๐Ÿค—", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Qwen2.5-72B + Flux-dev + FinePersonas = Grounded Structured Character Generator Check out my latest projects that uses https://huggingface.co/Qwen/Qwen2.5-72B-Instruct , https://huggingface.co/black-forest-labs/FLUX.1-dev , and https://huggingface.co/datasets/MohamedRashad/FinePersonas-Lite to generate different characters in a world of your description. Try Here: https://huggingface.co/spaces/MohamedRashad/Character-Generator ๐Ÿค—
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1628885133347-6116d0584ef9fdfbf45dc4d9.jpeg", "fullname": "Mohamed Rashad", "name": "MohamedRashad", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 141, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6116d0584ef9fdfbf45dc4d9/ZRJ8X5POhtN60HL_3udCB.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6116d0584ef9fdfbf45dc4d9/5N0xFRwFvuX-Fz2X-RcHK.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "MohamedRashad" ], "count": 2 }, { "reaction": "๐Ÿ‘", "users": [ "whitebill", "Krotos" ], "count": 2 } ]
2024-09-20T13:04:30.000Z
2024-09-20T13:06:40.953Z
[]
/posts/MohamedRashad/139746681909382
963
0
407961305075360
[ { "type": "text", "value": "Anthropic just released a chunk improvement technique that vastly improves RAG performance! ๐Ÿ”ฅ", "raw": "Anthropic just released a chunk improvement technique that vastly improves RAG performance! ๐Ÿ”ฅ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Crash reminder: Retrieval Augmented Generation (RAG) is a widely-used technique for improving your LLM chatbot's answers to user questions.", "raw": "Crash reminder: Retrieval Augmented Generation (RAG) is a widely-used technique for improving your LLM chatbot's answers to user questions.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It goes like this: instead of generating an LLM answer straight away, it just adds a previous step called Retrieval, that retrieves relevant documents from your knowledge base through semantic search, and just appends the top K documents to the prompt. โžก๏ธ As a result, the LLM answer is grounded in context.", "raw": "It goes like this: instead of generating an LLM answer straight away, it just adds a previous step called Retrieval, that retrieves relevant documents from your knowledge base through semantic search, and just appends the top K documents to the prompt. โžก๏ธ As a result, the LLM answer is grounded in context.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ›”๏ธ The difficulty with this retrieval step is that when you split your documents into chunks that will be retrieved, you lose context. So importance chunks could be missed.", "raw": "โ›”๏ธ The difficulty with this retrieval step is that when you split your documents into chunks that will be retrieved, you lose context. So importance chunks could be missed.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ’ก Anthropic's just released blog post shows that you can add some context to each chunk, with one LLM call. Then you embed the original chunk + a bit of added context, so that the embedding is much more representative of the document in its context!", "raw": "๐Ÿ’ก Anthropic's just released blog post shows that you can add some context to each chunk, with one LLM call. Then you embed the original chunk + a bit of added context, so that the embedding is much more representative of the document in its context!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿค” Isn't that crazy expensive? Well it would have been before, but not so much anymore with their new Prompt caching feature that makes duplicating thousands of requests with the same prompt much less expensive. They give an indicative price tag of only $1.02 per million chunks processed!", "raw": "๐Ÿค” Isn't that crazy expensive? Well it would have been before, but not so much anymore with their new Prompt caching feature that makes duplicating thousands of requests with the same prompt much less expensive. They give an indicative price tag of only $1.02 per million chunks processed!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โœ… And this vastly improves performance on their benchmark!", "raw": "โœ… And this vastly improves performance on their benchmark!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read their blog post ๐Ÿ‘‰ ", "raw": "Read their blog post ๐Ÿ‘‰ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.anthropic.com/news/contextual-retrieval", "href": "https://www.anthropic.com/news/contextual-retrieval", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Anthropic just released a chunk improvement technique that vastly improves RAG performance! ๐Ÿ”ฅ Crash reminder: Retrieval Augmented Generation (RAG) is a widely-used technique for improving your LLM chatbot's answers to user questions. It goes like this: instead of generating an LLM answer straight away, it just adds a previous step called Retrieval, that retrieves relevant documents from your knowledge base through semantic search, and just appends the top K documents to the prompt. โžก๏ธ As a result, the LLM answer is grounded in context. โ›”๏ธ The difficulty with this retrieval step is that when you split your documents into chunks that will be retrieved, you lose context. So importance chunks could be missed. ๐Ÿ’ก Anthropic's just released blog post shows that you can add some context to each chunk, with one LLM call. Then you embed the original chunk + a bit of added context, so that the embedding is much more representative of the document in its context! ๐Ÿค” Isn't that crazy expensive? Well it would have been before, but not so much anymore with their new Prompt caching feature that makes duplicating thousands of requests with the same prompt much less expensive. They give an indicative price tag of only $1.02 per million chunks processed! โœ… And this vastly improves performance on their benchmark! Read their blog post ๐Ÿ‘‰ https://www.anthropic.com/news/contextual-retrieval
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 494, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/oz94TbsyNRw__nmJxsKGX.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "wsuff" ], "count": 2 } ]
2024-09-20T12:54:23.000Z
2024-09-20T12:54:23.885Z
[]
/posts/m-ric/407961305075360
353
0
337858800657734
[ { "type": "text", "value": "Explore FinePersonas, visually with Argilla and ", "raw": "Explore FinePersonas, visually with Argilla and ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/black-forest-labs/FLUX.1-schnell", "href": null, "resource": { "type": "model", "id": "black-forest-labs/FLUX.1-schnell", "discussionNum": null }, "url": "https://huggingface.co/black-forest-labs/FLUX.1-schnell", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Excited to share this space where the community can explore a tiny subset of FinePersonas", "raw": "Excited to share this space where the community can explore a tiny subset of FinePersonas", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/argilla/finepersonas", "href": null, "resource": { "type": "space", "id": "argilla/finepersonas", "discussionNum": null }, "url": "https://huggingface.co/spaces/argilla/finepersonas", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Dataset built with distilabel and Free Serveless endpoints", "raw": "Dataset built with distilabel and Free Serveless endpoints", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This is just a first step towards more interesting experiments with FinePersonas, for example can we use it to assess biases in text2image models?", "raw": "This is just a first step towards more interesting experiments with FinePersonas, for example can we use it to assess biases in text2image models?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "If you have ideas I'd love to hear them in the comments!", "raw": "If you have ideas I'd love to hear them in the comments!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Explore FinePersonas, visually with Argilla and https://huggingface.co/black-forest-labs/FLUX.1-schnell Excited to share this space where the community can explore a tiny subset of FinePersonas https://huggingface.co/spaces/argilla/finepersonas Dataset built with distilabel and Free Serveless endpoints This is just a first step towards more interesting experiments with FinePersonas, for example can we use it to assess biases in text2image models? If you have ideas I'd love to hear them in the comments!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60420dccc15e823a685f2b03/Dn7QTyy9SZ7jKN6xpufVD.png", "fullname": "Daniel Vila", "name": "dvilasuero", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 231, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60420dccc15e823a685f2b03/XgIbRyy4_6O0skCDjqXwL.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "alielfilali01" ], "count": 2 }, { "reaction": "๐Ÿ”ฅ", "users": [ "alielfilali01" ], "count": 1 }, { "reaction": "โค๏ธ", "users": [ "alielfilali01" ], "count": 1 } ]
2024-09-20T12:35:54.000Z
2024-09-20T12:35:54.322Z
[]
/posts/dvilasuero/337858800657734
395
0
570076486531546
[ { "type": "text", "value": "Less than two days ago Kyutai Labs open sourced Moshi - an ~7.6B on-device Speech to Speech foundation model and Mimi - SoTA streaming speech codec! ๐Ÿ”ฅ", "raw": "Less than two days ago Kyutai Labs open sourced Moshi - an ~7.6B on-device Speech to Speech foundation model and Mimi - SoTA streaming speech codec! ๐Ÿ”ฅ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The release includes:", "raw": "The release includes:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Moshiko & Moshika - Moshi finetuned on synthetic data (CC-BY license) (", "raw": "1. Moshiko & Moshika - Moshi finetuned on synthetic data (CC-BY license) (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/kyutai/moshi-v01-release-66eaeaf3302bef6bd9ad7acd", "href": null, "resource": { "type": "collection", "id": "kyutai/moshi-v01-release-66eaeaf3302bef6bd9ad7acd", "discussionNum": null }, "url": "https://huggingface.co/collections/kyutai/moshi-v01-release-66eaeaf3302bef6bd9ad7acd", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ")", "raw": ")", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Mimi - Streaiming Audio Codec, processes 24 kHz audio, down to a 12.5 Hz representation with a bandwidth of 1.1 kbps (CC-BY license) (", "raw": "2. Mimi - Streaiming Audio Codec, processes 24 kHz audio, down to a 12.5 Hz representation with a bandwidth of 1.1 kbps (CC-BY license) (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/kyutai/mimi", "href": null, "resource": { "type": "model", "id": "kyutai/mimi", "discussionNum": null }, "url": "https://huggingface.co/kyutai/mimi", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ")", "raw": ")", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Model checkpoints & Inference codebase written in Rust (Candle), PyTorch & MLX (Apache license) (", "raw": "3. Model checkpoints & Inference codebase written in Rust (Candle), PyTorch & MLX (Apache license) (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/kyutai-labs/moshi", "href": "https://github.com/kyutai-labs/moshi", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ")", "raw": ")", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "How does Moshi work?", "raw": "How does Moshi work?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Moshi processes two audio streams: one for itself and one for the user, with the user's stream coming from audio input and Moshi's stream generated by the model.", "raw": "1. Moshi processes two audio streams: one for itself and one for the user, with the user's stream coming from audio input and Moshi's stream generated by the model.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Along with these audio streams, Moshi predicts text tokens for its speech, enhancing its generation quality.", "raw": "2. Along with these audio streams, Moshi predicts text tokens for its speech, enhancing its generation quality.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. The model uses a small Depth Transformer for codebook dependencies and a large 7B parameter Temporal Transformer for temporal dependencies.", "raw": "3. The model uses a small Depth Transformer for codebook dependencies and a large 7B parameter Temporal Transformer for temporal dependencies.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4. The theoretical latency is 160ms, with a practical latency of around 200ms on an L4 GPU.", "raw": "4. The theoretical latency is 160ms, with a practical latency of around 200ms on an L4 GPU.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model size & inference:", "raw": "Model size & inference:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Moshiko/ka are 7.69B param models", "raw": "Moshiko/ka are 7.69B param models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "bf16 ~16GB VRAM", "raw": "bf16 ~16GB VRAM", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "8-bit ~8GB VRAM", "raw": "8-bit ~8GB VRAM", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4-bit ~4GB VRAM", "raw": "4-bit ~4GB VRAM", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You can run inference via Candle ๐Ÿฆ€, PyTorch and MLX - based on your hardware.", "raw": "You can run inference via Candle ๐Ÿฆ€, PyTorch and MLX - based on your hardware.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The Kyutai team, ", "raw": "The Kyutai team, ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@adefossez", "href": null, "resource": null, "url": null, "code": null, "user": "adefossez", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@lmz", "href": null, "resource": null, "url": null, "code": null, "user": "lmz", "label": null, "lang": null }, { "type": "text", "value": " and team are cracked AF, they're bringing some serious firepower to the open source/ science AI scene, looking forward to what's next! ๐Ÿ", "raw": " and team are cracked AF, they're bringing some serious firepower to the open source/ science AI scene, looking forward to what's next! ๐Ÿ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Less than two days ago Kyutai Labs open sourced Moshi - an ~7.6B on-device Speech to Speech foundation model and Mimi - SoTA streaming speech codec! ๐Ÿ”ฅ The release includes: 1. Moshiko & Moshika - Moshi finetuned on synthetic data (CC-BY license) (https://huggingface.co/collections/kyutai/moshi-v01-release-66eaeaf3302bef6bd9ad7acd) 2. Mimi - Streaiming Audio Codec, processes 24 kHz audio, down to a 12.5 Hz representation with a bandwidth of 1.1 kbps (CC-BY license) (https://huggingface.co/kyutai/mimi) 3. Model checkpoints & Inference codebase written in Rust (Candle), PyTorch & MLX (Apache license) (https://github.com/kyutai-labs/moshi) How does Moshi work? 1. Moshi processes two audio streams: one for itself and one for the user, with the user's stream coming from audio input and Moshi's stream generated by the model. 2. Along with these audio streams, Moshi predicts text tokens for its speech, enhancing its generation quality. 3. The model uses a small Depth Transformer for codebook dependencies and a large 7B parameter Temporal Transformer for temporal dependencies. 4. The theoretical latency is 160ms, with a practical latency of around 200ms on an L4 GPU. Model size & inference: Moshiko/ka are 7.69B param models bf16 ~16GB VRAM 8-bit ~8GB VRAM 4-bit ~4GB VRAM You can run inference via Candle ๐Ÿฆ€, PyTorch and MLX - based on your hardware. The Kyutai team, @adefossez @lmz and team are cracked AF, they're bringing some serious firepower to the open source/ science AI scene, looking forward to what's next! ๐Ÿ
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1655385361868-61b85ce86eb1f2c5e6233736.jpeg", "fullname": "Vaibhav Srivastav", "name": "reach-vb", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 460, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/61b85ce86eb1f2c5e6233736/4AvxTcEapDvezIdUJA5mi.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1666708948380-noauth.jpeg", "fullname": "Alexandre Dรฉfossez", "name": "adefossez", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 18 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6355a3c1805be5a8f30fea49/ONMEctCWAeAgF2eZ307si.jpeg", "fullname": "Laurent Mazare", "name": "lmz", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 75 } ]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "lmz", "AtAndDev", "damerajee", "den0620", "adorkin", "Forbu14", "abdullah", "Nelathan", "osanseviero", "Dunateo", "bitsoko" ], "count": 11 }, { "reaction": "๐Ÿง ", "users": [ "MohamedRashad", "John6666", "fdaudens", "AtAndDev", "damerajee", "bitsoko" ], "count": 6 } ]
2024-09-20T12:04:17.000Z
2024-09-20T17:59:13.540Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6487239cca30096ea9f52115/HMte9wjKJgfcxsO-5vb_Q.jpeg", "fullname": "dame rajee", "name": "damerajee", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 11, "isFollowing": false } ]
/posts/reach-vb/570076486531546
2,803
1
962669244664239
[ { "type": "text", "value": "Check out the new Structured #Wikipedia dataset by Wikimedia Enterprise: abstract, infobox, structured sections, main image,... ", "raw": "Check out the new Structured #Wikipedia dataset by Wikimedia Enterprise: abstract, infobox, structured sections, main image,... ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Currently in early beta (English & French). Explore it and give feedback: ", "raw": "Currently in early beta (English & French). Explore it and give feedback: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/wikimedia/structured-wikipedia", "href": null, "resource": { "type": "dataset", "id": "wikimedia/structured-wikipedia", "discussionNum": null }, "url": "https://huggingface.co/datasets/wikimedia/structured-wikipedia", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "More info: ", "raw": "More info: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://enterprise.wikimedia.com/blog/hugging-face-dataset/", "href": "https://enterprise.wikimedia.com/blog/hugging-face-dataset/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@sdelbecque", "href": null, "resource": null, "url": null, "code": null, "user": "sdelbecque", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@resquito-wmf", "href": null, "resource": null, "url": null, "code": null, "user": "resquito-wmf", "label": null, "lang": null } ]
Check out the new Structured #Wikipedia dataset by Wikimedia Enterprise: abstract, infobox, structured sections, main image,... Currently in early beta (English & French). Explore it and give feedback: https://huggingface.co/datasets/wikimedia/structured-wikipedia More info: https://enterprise.wikimedia.com/blog/hugging-face-dataset/ @sdelbecque @resquito-wmf
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1606406298765-noauth.jpeg", "fullname": "Albert Villanova del Moral", "name": "albertvillanova", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 196, "isFollowing": false }
[]
[ { "avatarUrl": "/avatars/3f3b43b44636bd9363542d640619bea5.svg", "fullname": "Ricardo Esquito", "name": "resquito-wmf", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2 }, { "avatarUrl": "/avatars/2d68ee0d5d2df57df1856c29cdd1f2a8.svg", "fullname": "Stephanie Delbecque", "name": "sdelbecque", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1 } ]
[ { "reaction": "๐Ÿ‘", "users": [ "resquito-wmf", "davanstrien", "not-lain", "ayymen" ], "count": 4 }, { "reaction": "โค๏ธ", "users": [ "davanstrien", "sdelbecque" ], "count": 2 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-09-20T06:30:38.000Z
2024-09-20T06:41:07.090Z
[]
/posts/albertvillanova/962669244664239
1,511
0
756503682140875
[ { "type": "text", "value": "๐Ÿš€ Revolutionary Method to Convert YOLOv8 to On-Device AI with mobile NPU utilizations", "raw": "๐Ÿš€ Revolutionary Method to Convert YOLOv8 to On-Device AI with mobile NPU utilizations", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " Attention AI developers and engineers! ", "raw": " Attention AI developers and engineers! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Discover how ZETIC.MLange can effortlessly transform YOLOv8 models into on-device AI with mobile NPU utilizations. ๐ŸŽ‰", "raw": "Discover how ZETIC.MLange can effortlessly transform YOLOv8 models into on-device AI with mobile NPU utilizations. ๐ŸŽ‰", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ’ก We highlight the power of mobile NPU, showing how it outperforms CPU in processing speed. The results speak for themselvesโ€”NPU-driven execution is faster, smarter, and more efficient.", "raw": "๐Ÿ’ก We highlight the power of mobile NPU, showing how it outperforms CPU in processing speed. The results speak for themselvesโ€”NPU-driven execution is faster, smarter, and more efficient.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* Real-time demos are no easy feat but with ZETIC.MLange, weโ€™ve made it possible!", "raw": "* Real-time demos are no easy feat but with ZETIC.MLange, weโ€™ve made it possible!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐ŸŽฅ Watch the video to see how weโ€™re revolutionizing on-device AI.", "raw": "๐ŸŽฅ Watch the video to see how weโ€™re revolutionizing on-device AI.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ": ", "raw": ": ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/LkP3JDTcVN8?si=6Ha5vHA-G7jZE9oq", "href": "https://youtu.be/LkP3JDTcVN8?si=6Ha5vHA-G7jZE9oq", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐ŸŒŸ Our team has successfully implemented YOLOv8 as on-device AI using ZETIC.MLange. This innovative approach enables high-performance object detection across various manufacturers mobile devices.", "raw": "๐ŸŒŸ Our team has successfully implemented YOLOv8 as on-device AI using ZETIC.MLange. This innovative approach enables high-performance object detection across various manufacturers mobile devices.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ” Curious about the details? We've shared a comprehensive guide on our blog. Check it out through the link below!", "raw": "๐Ÿ” Curious about the details? We've shared a comprehensive guide on our blog. Check it out through the link below!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“š Blog link: ", "raw": "๐Ÿ“š Blog link: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://zetic.ai/blog/implementing-yolov8-on-device-ai-with-zetic-mlange", "href": "https://zetic.ai/blog/implementing-yolov8-on-device-ai-with-zetic-mlange", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿš€ Revolutionary Method to Convert YOLOv8 to On-Device AI with mobile NPU utilizations Attention AI developers and engineers! Discover how ZETIC.MLange can effortlessly transform YOLOv8 models into on-device AI with mobile NPU utilizations. ๐ŸŽ‰ ๐Ÿ’ก We highlight the power of mobile NPU, showing how it outperforms CPU in processing speed. The results speak for themselvesโ€”NPU-driven execution is faster, smarter, and more efficient. * Real-time demos are no easy feat but with ZETIC.MLange, weโ€™ve made it possible! ๐ŸŽฅ Watch the video to see how weโ€™re revolutionizing on-device AI. : https://youtu.be/LkP3JDTcVN8?si=6Ha5vHA-G7jZE9oq ๐ŸŒŸ Our team has successfully implemented YOLOv8 as on-device AI using ZETIC.MLange. This innovative approach enables high-performance object detection across various manufacturers mobile devices. ๐Ÿ” Curious about the details? We've shared a comprehensive guide on our blog. Check it out through the link below! ๐Ÿ“š Blog link: https://zetic.ai/blog/implementing-yolov8-on-device-ai-with-zetic-mlange
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/657566a76da136b50faaa48c/EvXVCEchiFsUiuLefhWsT.png", "fullname": "Yeonseok Kim", "name": "yeonseok-zeticai", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 6, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/657566a76da136b50faaa48c/u45cq1BJHEa6c7NZORY-C.mp4" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/657566a76da136b50faaa48c/3VyKsIGQ3K-tldZHbJMrt.png" } ]
[]
[ { "reaction": "๐Ÿš€", "users": [ "yeonseok-zeticai", "JJen889", "mmoazzamaliashraf", "jeremy-london", "SicarioOtsutsuki", "osanseviero" ], "count": 6 }, { "reaction": "๐Ÿ‘", "users": [ "JJen889", "yeonseok-zeticai", "mmoazzamaliashraf", "jeremy-london", "mmx31", "osanseviero" ], "count": 6 }, { "reaction": "๐Ÿ‘€", "users": [ "JJen889", "yeonseok-zeticai", "John6666", "bluemix", "jeremy-london", "osanseviero" ], "count": 6 }, { "reaction": "๐Ÿ˜Ž", "users": [ "JJen889", "yeonseok-zeticai", "jeremy-london", "osanseviero" ], "count": 4 }, { "reaction": "๐Ÿ”ฅ", "users": [ "JJen889", "yeonseok-zeticai", "jeremy-london" ], "count": 3 }, { "reaction": "๐Ÿค—", "users": [ "yeonseok-zeticai", "jeremy-london" ], "count": 2 }, { "reaction": "โค๏ธ", "users": [ "yeonseok-zeticai", "jeremy-london" ], "count": 2 }, { "reaction": "๐Ÿค", "users": [ "yeonseok-zeticai", "jeremy-london" ], "count": 2 }, { "reaction": "๐Ÿง ", "users": [ "yeonseok-zeticai" ], "count": 1 }, { "reaction": "โž•", "users": [ "yeonseok-zeticai" ], "count": 1 } ]
2024-09-20T05:59:46.000Z
2024-09-20T06:09:27.788Z
[]
/posts/yeonseok-zeticai/756503682140875
1,827
0
767021337618853
[ { "type": "text", "value": "๐Ÿš€ Excited to announce the release of InfiMM-WebMath-40B โ€” the largest open-source multimodal pretraining dataset designed to advance mathematical reasoning in AI! ๐Ÿงฎโœจ", "raw": "๐Ÿš€ Excited to announce the release of InfiMM-WebMath-40B โ€” the largest open-source multimodal pretraining dataset designed to advance mathematical reasoning in AI! ๐Ÿงฎโœจ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "With 40 billion tokens, this dataset aims for enhancing the reasoning capabilities of multimodal large language models in the domain of mathematics. ", "raw": "With 40 billion tokens, this dataset aims for enhancing the reasoning capabilities of multimodal large language models in the domain of mathematics. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "If you're interested in MLLMs, AI, and math reasoning, check out our work and dataset: ", "raw": "If you're interested in MLLMs, AI, and math reasoning, check out our work and dataset: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿค— HF: ", "raw": "๐Ÿค— HF: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2409.12568", "href": null, "resource": { "type": "paper", "id": "2409.12568", "discussionNum": null }, "url": "https://huggingface.co/papers/2409.12568", "code": null, "user": null, "label": "InfiMM-WebMath-40B: Advancing Multimodal Pre-Training for Enhanced\n Mathematical Reasoning (2409.12568)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“‚ Dataset: ", "raw": "๐Ÿ“‚ Dataset: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/Infi-MM/InfiMM-WebMath-40B", "href": null, "resource": { "type": "dataset", "id": "Infi-MM/InfiMM-WebMath-40B", "discussionNum": null }, "url": "https://huggingface.co/datasets/Infi-MM/InfiMM-WebMath-40B", "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿš€ Excited to announce the release of InfiMM-WebMath-40B โ€” the largest open-source multimodal pretraining dataset designed to advance mathematical reasoning in AI! ๐Ÿงฎโœจ With 40 billion tokens, this dataset aims for enhancing the reasoning capabilities of multimodal large language models in the domain of mathematics. If you're interested in MLLMs, AI, and math reasoning, check out our work and dataset: ๐Ÿค— HF: https://huggingface.co/papers/2409.12568 ๐Ÿ“‚ Dataset: https://huggingface.co/datasets/Infi-MM/InfiMM-WebMath-40B
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/650dde4ce14eeb01d42b37a1/n5Yv24uofZ2XJjXdYCrKd.png", "fullname": "Xiaotian Han", "name": "xiaotianhan", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 22, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿš€", "users": [ "xiaotianhan", "John6666", "SLC1404", "osanseviero" ], "count": 4 }, { "reaction": "๐Ÿ”ฅ", "users": [ "Nekuromento" ], "count": 1 } ]
2024-09-20T03:47:59.000Z
2024-09-20T03:47:59.180Z
[]
/posts/xiaotianhan/767021337618853
871
0
296729864819808
[ { "type": "text", "value": "Single Block / Layer FLUX LoRA Training Research Results and LoRA Network Alpha Change Impact With LoRA Network Rank Dimension", "raw": "Single Block / Layer FLUX LoRA Training Research Results and LoRA Network Alpha Change Impact With LoRA Network Rank Dimension", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Full article posted here : ", "raw": "Full article posted here : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://medium.com/@furkangozukara/single-block-layer-flux-lora-training-research-results-and-lora-network-alpha-change-impact-with-e713cc89c567", "href": "https://medium.com/@furkangozukara/single-block-layer-flux-lora-training-research-results-and-lora-network-alpha-change-impact-with-e713cc89c567", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Conclusions", "raw": "Conclusions", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "As expected, as you train lesse parameters e.g. LoRA vs Full Fine Tuning or Single Blocks LoRA vs all Blocks LoRA, your quality get reduced", "raw": "As expected, as you train lesse parameters e.g. LoRA vs Full Fine Tuning or Single Blocks LoRA vs all Blocks LoRA, your quality get reduced", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Of course you earn some extra VRAM memory reduction and also some reduced size on the disk", "raw": "Of course you earn some extra VRAM memory reduction and also some reduced size on the disk", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Moreover, lesser parameters reduces the overfitting and realism of the FLUX model, so if you are into stylized outputs like comic, it may work better", "raw": "Moreover, lesser parameters reduces the overfitting and realism of the FLUX model, so if you are into stylized outputs like comic, it may work better", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Furthermore, when you reduce LoRA Network Rank, keep original Network Alpha unless you are going to do a new Learning Rate research", "raw": "Furthermore, when you reduce LoRA Network Rank, keep original Network Alpha unless you are going to do a new Learning Rate research", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Finally, very best and least overfitting is achieved with full Fine Tuning", "raw": "Finally, very best and least overfitting is achieved with full Fine Tuning", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check figure 3 and figure 4 last columns โ€” I make extracted LoRA Strength / Weight 1.1 instead of 1.0", "raw": "Check figure 3 and figure 4 last columns โ€” I make extracted LoRA Strength / Weight 1.1 instead of 1.0", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Full fine tuning configs and instructions > ", "raw": "Full fine tuning configs and instructions > ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.patreon.com/posts/112099700", "href": "https://www.patreon.com/posts/112099700", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Second best one is extracting a LoRA from Fine Tuned model if you need a LoRA", "raw": "Second best one is extracting a LoRA from Fine Tuned model if you need a LoRA", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check figure 3 and figure 4 last columns โ€” I make extracted LoRA Strength / Weight 1.1 instead of 1.0", "raw": "Check figure 3 and figure 4 last columns โ€” I make extracted LoRA Strength / Weight 1.1 instead of 1.0", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Extract LoRA guide (public article) : ", "raw": "Extract LoRA guide (public article) : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.patreon.com/posts/112335162", "href": "https://www.patreon.com/posts/112335162", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Third is doing a all layers regular LoRA training", "raw": "Third is doing a all layers regular LoRA training", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Full guide, configs and instructions > ", "raw": "Full guide, configs and instructions > ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.patreon.com/posts/110879657", "href": "https://www.patreon.com/posts/110879657", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "And the worst quality is training lesser blocks / layers with LoRA", "raw": "And the worst quality is training lesser blocks / layers with LoRA", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Full configs are included in > ", "raw": "Full configs are included in > ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.patreon.com/posts/110879657", "href": "https://www.patreon.com/posts/110879657", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "So how much VRAM and Speed single block LoRA training brings?", "raw": "So how much VRAM and Speed single block LoRA training brings?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "All layers 16 bit is 27700 MB (4.85 second / it) and 1 single block is 25800 MB (3.7 second / it)", "raw": "All layers 16 bit is 27700 MB (4.85 second / it) and 1 single block is 25800 MB (3.7 second / it)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "All layers 8 bit is 17250 MB (4.85 second / it) and 1 single block is 15700 MB (3.8 second / it)", "raw": "All layers 8 bit is 17250 MB (4.85 second / it) and 1 single block is 15700 MB (3.8 second / it)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Image Raw Links", "raw": "Image Raw Links", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Figure 0 : ", "raw": "Figure 0 : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/MonsterMMORPG/FLUX-Fine-Tuning-Grid-Tests/resolve/main/Figure_0.jfif", "href": null, "resource": { "type": "model", "id": "MonsterMMORPG/FLUX-Fine-Tuning-Grid-Tests", "discussionNum": null }, "url": "https://huggingface.co/MonsterMMORPG/FLUX-Fine-Tuning-Grid-Tests/resolve/main/Figure_0.jfif", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Single Block / Layer FLUX LoRA Training Research Results and LoRA Network Alpha Change Impact With LoRA Network Rank Dimension Full article posted here : https://medium.com/@furkangozukara/single-block-layer-flux-lora-training-research-results-and-lora-network-alpha-change-impact-with-e713cc89c567 Conclusions As expected, as you train lesse parameters e.g. LoRA vs Full Fine Tuning or Single Blocks LoRA vs all Blocks LoRA, your quality get reduced Of course you earn some extra VRAM memory reduction and also some reduced size on the disk Moreover, lesser parameters reduces the overfitting and realism of the FLUX model, so if you are into stylized outputs like comic, it may work better Furthermore, when you reduce LoRA Network Rank, keep original Network Alpha unless you are going to do a new Learning Rate research Finally, very best and least overfitting is achieved with full Fine Tuning Check figure 3 and figure 4 last columns โ€” I make extracted LoRA Strength / Weight 1.1 instead of 1.0 Full fine tuning configs and instructions > https://www.patreon.com/posts/112099700 Second best one is extracting a LoRA from Fine Tuned model if you need a LoRA Check figure 3 and figure 4 last columns โ€” I make extracted LoRA Strength / Weight 1.1 instead of 1.0 Extract LoRA guide (public article) : https://www.patreon.com/posts/112335162 Third is doing a all layers regular LoRA training Full guide, configs and instructions > https://www.patreon.com/posts/110879657 And the worst quality is training lesser blocks / layers with LoRA Full configs are included in > https://www.patreon.com/posts/110879657 So how much VRAM and Speed single block LoRA training brings? All layers 16 bit is 27700 MB (4.85 second / it) and 1 single block is 25800 MB (3.7 second / it) All layers 8 bit is 17250 MB (4.85 second / it) and 1 single block is 15700 MB (3.8 second / it) Image Raw Links Figure 0 : https://huggingface.co/MonsterMMORPG/FLUX-Fine-Tuning-Grid-Tests/resolve/main/Figure_0.jfif
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672531901326-6345bd89fe134dfd7a0dba40.png", "fullname": "Furkan Gรถzรผkara", "name": "MonsterMMORPG", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 376, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/Eybpb4QT1h5LfHC10pi9s.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/J0WYjMS8U3OFCVuDndFaC.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/0ppJAyFlTtgfiZrDAjQQW.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/Ew_poEh4Y0EaPrteS_U4f.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/NiUBtLwX0ihen_Hzh_S9d.jpeg" } ]
[]
[ { "reaction": "๐Ÿš€", "users": [ "MonsterMMORPG", "John6666", "jaisanrobert" ], "count": 3 }, { "reaction": "๐Ÿ‘", "users": [ "MonsterMMORPG", "multimodalart" ], "count": 2 }, { "reaction": "๐Ÿคฏ", "users": [ "MonsterMMORPG", "jaisanrobert" ], "count": 2 }, { "reaction": "๐Ÿ”ฅ", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿ‘€", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "โค๏ธ", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿค—", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿ˜Ž", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "โž•", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿง ", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿค", "users": [ "MonsterMMORPG" ], "count": 1 } ]
2024-09-20T02:28:35.000Z
2024-09-20T02:28:35.492Z
[]
/posts/MonsterMMORPG/296729864819808
1,006
0
374015631171379
[ { "type": "mention", "value": null, "raw": "@JonSteinworth", "href": null, "resource": null, "url": null, "code": null, "user": "JonSteinworth", "label": null, "lang": null }, { "type": "text", "value": " please acces ", "raw": " please acces ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/shainar/BEAD", "href": null, "resource": { "type": "dataset", "id": "shainar/BEAD", "discussionNum": null }, "url": "https://huggingface.co/datasets/shainar/BEAD", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "All these data pieces are there ", "raw": "All these data pieces are there ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
@JonSteinworth please acces https://huggingface.co/datasets/shainar/BEAD All these data pieces are there
{ "avatarUrl": "/avatars/951e272ffccf2388f138b248e5ef7142.svg", "fullname": "Shaina Raza", "name": "shainar", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false }
[]
[ { "avatarUrl": "/avatars/ed0a94ac6afa98520eda02f108575dd7.svg", "fullname": "JonSteinworth", "name": "JonSteinworth", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null } ]
[]
2024-09-19T23:49:30.000Z
2024-09-19T23:49:30.089Z
[]
/posts/shainar/374015631171379
430
0
362784568538408
[ { "type": "text", "value": "Backlink Profile Concept: \"A clean, modern vector illustration showing a website surrounded by various interconnected links representing a backlink profile, with arrows pointing to it from other authority websites.\"", "raw": "Backlink Profile Concept: \"A clean, modern vector illustration showing a website surrounded by various interconnected links representing a backlink profile, with arrows pointing to it from other authority websites.\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Backlink Profile Concept: "A clean, modern vector illustration showing a website surrounded by various interconnected links representing a backlink profile, with arrows pointing to it from other authority websites."
{ "avatarUrl": "/avatars/070c2b4d84796d2d0f556f3b0ea80cd8.svg", "fullname": "makana", "name": "posrs", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }
[]
[]
[]
2024-09-19T23:15:18.000Z
2024-09-19T23:15:18.155Z
[]
/posts/posrs/362784568538408
326
0
277888164967463
[ { "type": "text", "value": "today's release: the updated Supernova general chat dataset!", "raw": "today's release: the updated Supernova general chat dataset!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- the new Supernova is 2x the rows, continuing to provide high quality general synthetic data generated with Llama 405b Instruct.", "raw": "- the new Supernova is 2x the rows, continuing to provide high quality general synthetic data generated with Llama 405b Instruct.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Find it at ", "raw": "Find it at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/sequelbox/Supernova", "href": null, "resource": { "type": "dataset", "id": "sequelbox/Supernova", "discussionNum": null }, "url": "https://huggingface.co/datasets/sequelbox/Supernova", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Enjoy! There's also a new version of ", "raw": "Enjoy! There's also a new version of ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/sequelbox/Llama3.1-8B-MOTH", "href": null, "resource": { "type": "model", "id": "sequelbox/Llama3.1-8B-MOTH", "discussionNum": null }, "url": "https://huggingface.co/sequelbox/Llama3.1-8B-MOTH", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " available using the new dataset. (new and better MOTHs for other models will come as well, but the Build Tools and Shining Valiant take priority.)", "raw": " available using the new dataset. (new and better MOTHs for other models will come as well, but the Build Tools and Shining Valiant take priority.)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
today's release: the updated Supernova general chat dataset! - the new Supernova is 2x the rows, continuing to provide high quality general synthetic data generated with Llama 405b Instruct. Find it at https://huggingface.co/datasets/sequelbox/Supernova Enjoy! There's also a new version of https://huggingface.co/sequelbox/Llama3.1-8B-MOTH available using the new dataset. (new and better MOTHs for other models will come as well, but the Build Tools and Shining Valiant take priority.)
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63444f2687964b331809eb55/WvZivsvKsM_t0tBtakovK.png", "fullname": "t.d.a.g.", "name": "sequelbox", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 51, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-09-19T22:31:14.000Z
2024-09-19T22:31:14.419Z
[]
/posts/sequelbox/277888164967463
354
0
466987993068050
[ { "type": "text", "value": "๐ŸŽ“Introducing ruschatgpt Q&A Dataset - ", "raw": "๐ŸŽ“Introducing ruschatgpt Q&A Dataset - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/nyuuzyou/ruschatgpt-qa", "href": null, "resource": { "type": "dataset", "id": "nyuuzyou/ruschatgpt-qa", "discussionNum": null }, "url": "https://huggingface.co/datasets/nyuuzyou/ruschatgpt-qa", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Dataset highlights:", "raw": "Dataset highlights:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- 190,281 question-answer pairs from ruschatgpt.ru, a Russian question-answering website", "raw": "- 190,281 question-answer pairs from ruschatgpt.ru, a Russian question-answering website", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Monolingual content in Russian", "raw": "- Monolingual content in Russian", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Each entry includes: URL, question, and response", "raw": "- Each entry includes: URL, question, and response", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Data reflects user-generated questions and language model-generated answers", "raw": "- Data reflects user-generated questions and language model-generated answers", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Licensed under Creative Commons Zero (CC0) for unrestricted use", "raw": "- Licensed under Creative Commons Zero (CC0) for unrestricted use", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The dataset can be used for analyzing trends in AI-powered question answering in Russia. It's also valuable for examining Russian language patterns and topic distributions in user queries and AI responses.", "raw": "The dataset can be used for analyzing trends in AI-powered question answering in Russia. It's also valuable for examining Russian language patterns and topic distributions in user queries and AI responses.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐ŸŽ“Introducing ruschatgpt Q&A Dataset - https://huggingface.co/datasets/nyuuzyou/ruschatgpt-qa Dataset highlights: - 190,281 question-answer pairs from ruschatgpt.ru, a Russian question-answering website - Monolingual content in Russian - Each entry includes: URL, question, and response - Data reflects user-generated questions and language model-generated answers - Licensed under Creative Commons Zero (CC0) for unrestricted use The dataset can be used for analyzing trends in AI-powered question answering in Russia. It's also valuable for examining Russian language patterns and topic distributions in user queries and AI responses.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/643ac5d2e2b979ae6144d68c/Z7PCNopn4cQeAYnVJDoqG.png", "fullname": "nyuuzyou", "name": "nyuuzyou", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 57, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-09-19T20:31:26.000Z
2024-09-19T20:31:26.997Z
[]
/posts/nyuuzyou/466987993068050
365
0
495312490660224
[ { "type": "text", "value": "Good folks from VILA Lab at Mohamed bin Zayed University of AI have introduced 26 guiding principles for optimizing prompts when interacting with large language models (LLMs) like LLaMA and GPT.", "raw": "Good folks from VILA Lab at Mohamed bin Zayed University of AI have introduced 26 guiding principles for optimizing prompts when interacting with large language models (LLMs) like LLaMA and GPT.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "These principles aim to enhance LLM response quality, accuracy, and task alignment across various scales of models.", "raw": "These principles aim to enhance LLM response quality, accuracy, and task alignment across various scales of models.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Be direct and concise, avoiding unnecessary politeness.", "raw": "1. Be direct and concise, avoiding unnecessary politeness.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Specify the intended audience.", "raw": "2. Specify the intended audience.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Break complex tasks into simpler steps.", "raw": "3. Break complex tasks into simpler steps.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4. Use affirmative directives instead of negative language.", "raw": "4. Use affirmative directives instead of negative language.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "5. Request explanations in simple terms for clarity.", "raw": "5. Request explanations in simple terms for clarity.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "6. Mention a potential reward for better solutions.", "raw": "6. Mention a potential reward for better solutions.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "7. Provide examples to guide responses.", "raw": "7. Provide examples to guide responses.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "8. Use consistent formatting and structure.", "raw": "8. Use consistent formatting and structure.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "9. Clearly state tasks and requirements.", "raw": "9. Clearly state tasks and requirements.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "10. Mention potential penalties for incorrect responses.", "raw": "10. Mention potential penalties for incorrect responses.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "11. Request natural, human-like answers.", "raw": "11. Request natural, human-like answers.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "12. Encourage step-by-step thinking.", "raw": "12. Encourage step-by-step thinking.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "13. Ask for unbiased responses without stereotypes.", "raw": "13. Ask for unbiased responses without stereotypes.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "14. Allow the model to ask clarifying questions.", "raw": "14. Allow the model to ask clarifying questions.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "15. Request explanations with self-tests.", "raw": "15. Request explanations with self-tests.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "16. Assign specific roles to the model.", "raw": "16. Assign specific roles to the model.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "17. Use delimiters to separate sections.", "raw": "17. Use delimiters to separate sections.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "18. Repeat key words or phrases for emphasis.", "raw": "18. Repeat key words or phrases for emphasis.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "19. Combine chain-of-thought with few-shot prompts.", "raw": "19. Combine chain-of-thought with few-shot prompts.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "20. Use output primers to guide responses.", "raw": "20. Use output primers to guide responses.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "21. Request detailed responses on specific topics.", "raw": "21. Request detailed responses on specific topics.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "22. Specify how to revise or improve text.", "raw": "22. Specify how to revise or improve text.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "23. Provide instructions for generating multi-file code.", "raw": "23. Provide instructions for generating multi-file code.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "24. Give specific starting points for text generation.", "raw": "24. Give specific starting points for text generation.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "25. Clearly state content requirements and guidelines.", "raw": "25. Clearly state content requirements and guidelines.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "26. Request responses similar to provided examples.", "raw": "26. Request responses similar to provided examples.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Results show significant improvements in both \"boosting\" (response quality enhancement) and \"correctness\" across different model scales. Using the ATLAS benchmark, specialized prompts improved response quality and accuracy by an average of 57.7% and 67.3%, respectively, when applied to GPT-4.", "raw": "Results show significant improvements in both \"boosting\" (response quality enhancement) and \"correctness\" across different model scales. Using the ATLAS benchmark, specialized prompts improved response quality and accuracy by an average of 57.7% and 67.3%, respectively, when applied to GPT-4.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Good folks from VILA Lab at Mohamed bin Zayed University of AI have introduced 26 guiding principles for optimizing prompts when interacting with large language models (LLMs) like LLaMA and GPT. These principles aim to enhance LLM response quality, accuracy, and task alignment across various scales of models. 1. Be direct and concise, avoiding unnecessary politeness. 2. Specify the intended audience. 3. Break complex tasks into simpler steps. 4. Use affirmative directives instead of negative language. 5. Request explanations in simple terms for clarity. 6. Mention a potential reward for better solutions. 7. Provide examples to guide responses. 8. Use consistent formatting and structure. 9. Clearly state tasks and requirements. 10. Mention potential penalties for incorrect responses. 11. Request natural, human-like answers. 12. Encourage step-by-step thinking. 13. Ask for unbiased responses without stereotypes. 14. Allow the model to ask clarifying questions. 15. Request explanations with self-tests. 16. Assign specific roles to the model. 17. Use delimiters to separate sections. 18. Repeat key words or phrases for emphasis. 19. Combine chain-of-thought with few-shot prompts. 20. Use output primers to guide responses. 21. Request detailed responses on specific topics. 22. Specify how to revise or improve text. 23. Provide instructions for generating multi-file code. 24. Give specific starting points for text generation. 25. Clearly state content requirements and guidelines. 26. Request responses similar to provided examples. Results show significant improvements in both "boosting" (response quality enhancement) and "correctness" across different model scales. Using the ATLAS benchmark, specialized prompts improved response quality and accuracy by an average of 57.7% and 67.3%, respectively, when applied to GPT-4.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/6mYXoNnIIzJqwdHcrDNYl.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "SLC1404", "louisbrulenaudet" ], "count": 3 } ]
2024-09-19T20:19:41.000Z
2024-09-19T20:19:41.468Z
[]
/posts/singhsidhukuldeep/495312490660224
358
0
664056222467611
[ { "type": "text", "value": "Inference Endpoints got a bunch of cool updates yesterday, this is my top 3", "raw": "Inference Endpoints got a bunch of cool updates yesterday, this is my top 3", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Inference Endpoints got a bunch of cool updates yesterday, this is my top 3
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1605114051380-noauth.jpeg", "fullname": "Jeff Boudier", "name": "jeffboudier", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 195, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/5fac18fb5eec0323e9470ba2/0uORfjRSR2Ant-gXrzsqu.mp4" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "osanseviero" ], "count": 2 } ]
2024-09-19T19:18:22.000Z
2024-09-19T19:18:22.265Z
[]
/posts/jeffboudier/664056222467611
447
0
316708748461696
[ { "type": "text", "value": "๐Ÿš€ Excited to share the latest update to the Notebook Creator Tool!", "raw": "๐Ÿš€ Excited to share the latest update to the Notebook Creator Tool!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Now with basic fine-tuning support using Supervised Fine-Tuning! ๐ŸŽฏ", "raw": "Now with basic fine-tuning support using Supervised Fine-Tuning! ๐ŸŽฏ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "How it works: ", "raw": "How it works: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1๏ธโƒฃ Choose your Hugging Face dataset and notebook type (SFT)", "raw": "1๏ธโƒฃ Choose your Hugging Face dataset and notebook type (SFT)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2๏ธโƒฃ Automatically generate your training notebook ", "raw": "2๏ธโƒฃ Automatically generate your training notebook ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3๏ธโƒฃ Start fine-tuning with your data!", "raw": "3๏ธโƒฃ Start fine-tuning with your data!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Link to the app ๐Ÿ‘‰ ", "raw": "Link to the app ๐Ÿ‘‰ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://lnkd.in/e_3nmWrB", "href": "https://lnkd.in/e_3nmWrB", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ’ก Want to contribute with new notebooks? ๐Ÿ‘‰https://lnkd.in/eWcZ92dS", "raw": "๐Ÿ’ก Want to contribute with new notebooks? ๐Ÿ‘‰https://lnkd.in/eWcZ92dS", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿš€ Excited to share the latest update to the Notebook Creator Tool! Now with basic fine-tuning support using Supervised Fine-Tuning! ๐ŸŽฏ How it works: 1๏ธโƒฃ Choose your Hugging Face dataset and notebook type (SFT) 2๏ธโƒฃ Automatically generate your training notebook 3๏ธโƒฃ Start fine-tuning with your data! Link to the app ๐Ÿ‘‰ https://lnkd.in/e_3nmWrB ๐Ÿ’ก Want to contribute with new notebooks? ๐Ÿ‘‰https://lnkd.in/eWcZ92dS
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674055965173-noauth.jpeg", "fullname": "Andrea Soria", "name": "asoria", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 61, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/63c8113f46421a2efe7f067e/ScnJSLVUdRZPCY9yrfAfa.mp4" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "not-lain", "djuna" ], "count": 3 } ]
2024-09-19T16:21:48.000Z
2024-09-19T16:21:48.364Z
[]
/posts/asoria/316708748461696
958
0
321766557032408
[ { "type": "text", "value": "What are your favorite text chunkers/splitters? ", "raw": "What are your favorite text chunkers/splitters? ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Mine are: ", "raw": "Mine are: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ", "raw": "- ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/benbrandt/text-splitter", "href": "https://github.com/benbrandt/text-splitter", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " (Rust/Python, battle-tested, Wasm version coming soon) ", "raw": " (Rust/Python, battle-tested, Wasm version coming soon) ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ", "raw": "- ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/umarbutler/semchunk", "href": "https://github.com/umarbutler/semchunk", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " (Python, really performant but some issues with huge docs)", "raw": " (Python, really performant but some issues with huge docs)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I tried the huge Jina AI regex, but it failed for my (admittedly messy) documents, e.g. from EUR-LEX. Their free segmenter API is really cool but unfortunately times out on my huge docs (~100 pages): ", "raw": "I tried the huge Jina AI regex, but it failed for my (admittedly messy) documents, e.g. from EUR-LEX. Their free segmenter API is really cool but unfortunately times out on my huge docs (~100 pages): ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://jina.ai/segmenter/", "href": "https://jina.ai/segmenter/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Also, I tried to write a Vanilla JS chunker with a simple, adjustable hierarchical logic (inspired from the above). I think it does a decent job for the few lines of code: ", "raw": "Also, I tried to write a Vanilla JS chunker with a simple, adjustable hierarchical logic (inspired from the above). I think it does a decent job for the few lines of code: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://do-me.github.io/js-text-chunker/", "href": "https://do-me.github.io/js-text-chunker/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Happy to hear your thoughts! ", "raw": "Happy to hear your thoughts! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
What are your favorite text chunkers/splitters? Mine are: - https://github.com/benbrandt/text-splitter (Rust/Python, battle-tested, Wasm version coming soon) - https://github.com/umarbutler/semchunk (Python, really performant but some issues with huge docs) I tried the huge Jina AI regex, but it failed for my (admittedly messy) documents, e.g. from EUR-LEX. Their free segmenter API is really cool but unfortunately times out on my huge docs (~100 pages): https://jina.ai/segmenter/ Also, I tried to write a Vanilla JS chunker with a simple, adjustable hierarchical logic (inspired from the above). I think it does a decent job for the few lines of code: https://do-me.github.io/js-text-chunker/ Happy to hear your thoughts!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/IiercF_qxHWize2kitl9X.jpeg", "fullname": "Dominik Weckmรผller", "name": "do-me", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 38, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "cschroeder", "John6666" ], "count": 2 }, { "reaction": "โค๏ธ", "users": [ "Felladrin" ], "count": 1 } ]
2024-09-19T15:09:00.000Z
2024-09-19T18:14:33.724Z
[ { "avatarUrl": "/avatars/f32291df2054c1bb4a01889d1b41c0d5.svg", "fullname": "Christopher Schrรถder", "name": "cschroeder", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 18, "isFollowing": false } ]
/posts/do-me/321766557032408
1,030
1
663661698988100
[ { "type": "text", "value": "๐Ÿš€ Your AI toolkit just got a major upgrade! I updated the Journalists on Hugging Face community's collection with tools for investigative work, content creation, and data analysis.", "raw": "๐Ÿš€ Your AI toolkit just got a major upgrade! I updated the Journalists on Hugging Face community's collection with tools for investigative work, content creation, and data analysis.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Sharing these new additions with the links in case itโ€™s helpful:", "raw": "Sharing these new additions with the links in case itโ€™s helpful:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ", "raw": "- ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@wendys-llc", "href": null, "resource": null, "url": null, "code": null, "user": "wendys-llc", "label": null, "lang": null }, { "type": "text", "value": "'s excellent 6-part video series on AI for investigative journalism ", "raw": "'s excellent 6-part video series on AI for investigative journalism ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.youtube.com/playlist?list=PLewNEVDy7gq1_GPUaL0OQ31QsiHP5ncAQ", "href": "https://www.youtube.com/playlist?list=PLewNEVDy7gq1_GPUaL0OQ31QsiHP5ncAQ", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ", "raw": "- ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@jeremycaplan", "href": null, "resource": null, "url": null, "code": null, "user": "jeremycaplan", "label": null, "lang": null }, { "type": "text", "value": "'s curated AI Spaces on HF ", "raw": "'s curated AI Spaces on HF ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://wondertools.substack.com/p/huggingface", "href": "https://wondertools.substack.com/p/huggingface", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ", "raw": "- ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@Xenova", "href": null, "resource": null, "url": null, "code": null, "user": "Xenova", "label": null, "lang": null }, { "type": "text", "value": "'s Whisper Timestamped (with diarization!) for private, on-device transcription ", "raw": "'s Whisper Timestamped (with diarization!) for private, on-device transcription ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Xenova/whisper-speaker-diarization", "href": null, "resource": { "type": "space", "id": "Xenova/whisper-speaker-diarization", "discussionNum": null }, "url": "https://huggingface.co/spaces/Xenova/whisper-speaker-diarization", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " & ", "raw": " & ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Xenova/whisper-word-level-timestamps", "href": null, "resource": { "type": "space", "id": "Xenova/whisper-word-level-timestamps", "discussionNum": null }, "url": "https://huggingface.co/spaces/Xenova/whisper-word-level-timestamps", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Flux models for image gen & LoRAs ", "raw": "- Flux models for image gen & LoRAs ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/autotrain-projects/train-flux-lora-ease", "href": null, "resource": { "type": "space", "id": "autotrain-projects/train-flux-lora-ease", "discussionNum": null }, "url": "https://huggingface.co/spaces/autotrain-projects/train-flux-lora-ease", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- FineGrain's object cutter ", "raw": "- FineGrain's object cutter ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/finegrain/finegrain-object-cutter", "href": null, "resource": { "type": "space", "id": "finegrain/finegrain-object-cutter", "discussionNum": null }, "url": "https://huggingface.co/spaces/finegrain/finegrain-object-cutter", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " and object eraser (this one's cool) ", "raw": " and object eraser (this one's cool) ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/finegrain/finegrain-object-eraser", "href": null, "resource": { "type": "space", "id": "finegrain/finegrain-object-eraser", "discussionNum": null }, "url": "https://huggingface.co/spaces/finegrain/finegrain-object-eraser", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- FineVideo: massive open-source annotated dataset + explorer ", "raw": "- FineVideo: massive open-source annotated dataset + explorer ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/HuggingFaceFV/FineVideo-Explorer", "href": null, "resource": { "type": "space", "id": "HuggingFaceFV/FineVideo-Explorer", "discussionNum": null }, "url": "https://huggingface.co/spaces/HuggingFaceFV/FineVideo-Explorer", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Qwen2 chat demos, including 2.5 & multimodal versions (crushing it on handwriting recognition) ", "raw": "- Qwen2 chat demos, including 2.5 & multimodal versions (crushing it on handwriting recognition) ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Qwen/Qwen2.5", "href": null, "resource": { "type": "space", "id": "Qwen/Qwen2.5", "discussionNum": null }, "url": "https://huggingface.co/spaces/Qwen/Qwen2.5", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " & ", "raw": " & ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Qwen/Qwen2-VL", "href": null, "resource": { "type": "space", "id": "Qwen/Qwen2-VL", "discussionNum": null }, "url": "https://huggingface.co/spaces/Qwen/Qwen2-VL", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- GOT-OCR integration ", "raw": "- GOT-OCR integration ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/stepfun-ai/GOT_official_online_demo", "href": null, "resource": { "type": "space", "id": "stepfun-ai/GOT_official_online_demo", "discussionNum": null }, "url": "https://huggingface.co/spaces/stepfun-ai/GOT_official_online_demo", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- HTML to Markdown converter ", "raw": "- HTML to Markdown converter ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/maxiw/HTML-to-Markdown", "href": null, "resource": { "type": "space", "id": "maxiw/HTML-to-Markdown", "discussionNum": null }, "url": "https://huggingface.co/spaces/maxiw/HTML-to-Markdown", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Text-to-SQL query tool by ", "raw": "- Text-to-SQL query tool by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@davidberenstein1957", "href": null, "resource": null, "url": null, "code": null, "user": "davidberenstein1957", "label": null, "lang": null }, { "type": "text", "value": " for HF datasets ", "raw": " for HF datasets ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/davidberenstein1957/text-to-sql-hub-datasets", "href": null, "resource": { "type": "space", "id": "davidberenstein1957/text-to-sql-hub-datasets", "discussionNum": null }, "url": "https://huggingface.co/spaces/davidberenstein1957/text-to-sql-hub-datasets", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "There's a lot of potential here for journalism and beyond. Give these a try and let me know what you build! ", "raw": "There's a lot of potential here for journalism and beyond. Give these a try and let me know what you build! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You can also add your favorite ones if you're part of the community!", "raw": "You can also add your favorite ones if you're part of the community!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check it out: ", "raw": "Check it out: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/JournalistsonHF", "href": "https://huggingface.co/JournalistsonHF", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "#AIforJournalism #HuggingFace #OpenSourceAI", "raw": "#AIforJournalism #HuggingFace #OpenSourceAI", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿš€ Your AI toolkit just got a major upgrade! I updated the Journalists on Hugging Face community's collection with tools for investigative work, content creation, and data analysis. Sharing these new additions with the links in case itโ€™s helpful: - @wendys-llc's excellent 6-part video series on AI for investigative journalism https://www.youtube.com/playlist?list=PLewNEVDy7gq1_GPUaL0OQ31QsiHP5ncAQ - @jeremycaplan's curated AI Spaces on HF https://wondertools.substack.com/p/huggingface - @Xenova's Whisper Timestamped (with diarization!) for private, on-device transcription https://huggingface.co/spaces/Xenova/whisper-speaker-diarization & https://huggingface.co/spaces/Xenova/whisper-word-level-timestamps - Flux models for image gen & LoRAs https://huggingface.co/spaces/autotrain-projects/train-flux-lora-ease - FineGrain's object cutter https://huggingface.co/spaces/finegrain/finegrain-object-cutter and object eraser (this one's cool) https://huggingface.co/spaces/finegrain/finegrain-object-eraser - FineVideo: massive open-source annotated dataset + explorer https://huggingface.co/spaces/HuggingFaceFV/FineVideo-Explorer - Qwen2 chat demos, including 2.5 & multimodal versions (crushing it on handwriting recognition) https://huggingface.co/spaces/Qwen/Qwen2.5 & https://huggingface.co/spaces/Qwen/Qwen2-VL - GOT-OCR integration https://huggingface.co/spaces/stepfun-ai/GOT_official_online_demo - HTML to Markdown converter https://huggingface.co/spaces/maxiw/HTML-to-Markdown - Text-to-SQL query tool by @davidberenstein1957 for HF datasets https://huggingface.co/spaces/davidberenstein1957/text-to-sql-hub-datasets There's a lot of potential here for journalism and beyond. Give these a try and let me know what you build! You can also add your favorite ones if you're part of the community! Check it out: https://huggingface.co/JournalistsonHF #AIforJournalism #HuggingFace #OpenSourceAI
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 384, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/z-vtHAr-Jd-ljZ8JqTKAG.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg", "fullname": "David Berenstein", "name": "davidberenstein1957", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 167 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/667b3035e005e1dbcc777058/xMraPsyxCJxR4CUdaRJyl.jpeg", "fullname": "Jeremy Caplan", "name": "jeremycaplan", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 8 }, { "avatarUrl": "/avatars/757528cee00457f61c09296e48d5a106.svg", "fullname": "j soma", "name": "wendys-llc", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 6 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61b253b7ac5ecaae3d1efe0c/hwiQ0uvz3t-L5a-NtBIO6.png", "fullname": "Joshua", "name": "Xenova", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 3792 } ]
[ { "reaction": "๐Ÿš€", "users": [ "John6666", "Taylor658", "louisbrulenaudet" ], "count": 3 }, { "reaction": "๐Ÿ‘", "users": [ "fffiloni", "jeremycaplan" ], "count": 2 } ]
2024-09-19T13:10:01.000Z
2024-09-19T13:10:01.028Z
[]
/posts/fdaudens/663661698988100
854
0
829049751921028
[ { "type": "text", "value": "This issue is just a treasure ! A bit deprecated i guess, but things are in their historical context. (personally, still need more to understand better)", "raw": "This issue is just a treasure ! A bit deprecated i guess, but things are in their historical context. (personally, still need more to understand better)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/huggingface/transformers/issues/8771", "href": "https://github.com/huggingface/transformers/issues/8771", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿซก to the man ", "raw": "๐Ÿซก to the man ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@stas", "href": null, "resource": null, "url": null, "code": null, "user": "stas", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
This issue is just a treasure ! A bit deprecated i guess, but things are in their historical context. (personally, still need more to understand better) https://github.com/huggingface/transformers/issues/8771 ๐Ÿซก to the man @stas
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626237d9bbcbd1c34f1bb231/EJrOjvAL-68qMCYdnvOrq.png", "fullname": "Ali El Filali", "name": "alielfilali01", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 186, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1594311341799-5f07383b19cb630495b812cd.jpeg", "fullname": "Stas Bekman", "name": "stas", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 97 } ]
[ { "reaction": "๐Ÿง ", "users": [ "John6666" ], "count": 1 } ]
2024-09-19T11:33:45.000Z
2024-09-19T11:33:45.078Z
[]
/posts/alielfilali01/829049751921028
374
0
875775738519407
[ { "type": "text", "value": "๐ŸŽ‰SetFit v1.1.0 is out! Training efficient classifiers on CPU or GPU now uses the Sentence Transformers Trainer, and we resolved a lot of issues caused by updates of third-party libraries (like Transformers). Details:", "raw": "๐ŸŽ‰SetFit v1.1.0 is out! Training efficient classifiers on CPU or GPU now uses the Sentence Transformers Trainer, and we resolved a lot of issues caused by updates of third-party libraries (like Transformers). Details:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Training a SetFit classifier model consists of 2 phases: ", "raw": "Training a SetFit classifier model consists of 2 phases: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Finetuning a Sentence Transformer embedding model", "raw": "1. Finetuning a Sentence Transformer embedding model", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Training a Classifier to map embeddings -> classes", "raw": "2. Training a Classifier to map embeddings -> classes", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ”ŒThe first phase now uses the SentenceTransformerTrainer that was introduced in the Sentence Transformers v3 update. This brings some immediate upsides like MultiGPU support, without any (intended) breaking changes.", "raw": "๐Ÿ”ŒThe first phase now uses the SentenceTransformerTrainer that was introduced in the Sentence Transformers v3 update. This brings some immediate upsides like MultiGPU support, without any (intended) breaking changes.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โžก๏ธ Beyond that, we softly deprecated the \"evaluation_strategy\" argument in favor of \"eval_strategy\" (following a Transformers deprecation), and deprecated Python 3.7. In return, we add official support for Python 3.11 and 3.12.", "raw": "โžก๏ธ Beyond that, we softly deprecated the \"evaluation_strategy\" argument in favor of \"eval_strategy\" (following a Transformers deprecation), and deprecated Python 3.7. In return, we add official support for Python 3.11 and 3.12.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โœจ There's some more minor changes too, like max_steps and eval_max_steps now being a hard limit instead of an approximate one, training/validation losses now logging nicely in Notebooks, and the \"device\" parameter no longer being ignored in some situations.", "raw": "โœจ There's some more minor changes too, like max_steps and eval_max_steps now being a hard limit instead of an approximate one, training/validation losses now logging nicely in Notebooks, and the \"device\" parameter no longer being ignored in some situations.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check out the full release notes here: ", "raw": "Check out the full release notes here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/huggingface/setfit/releases/tag/v1.1.0", "href": "https://github.com/huggingface/setfit/releases/tag/v1.1.0", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Or read the documentation: ", "raw": "Or read the documentation: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/docs/setfit", "href": "https://huggingface.co/docs/setfit", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Or check out the public SetFit models for inspiration: ", "raw": "Or check out the public SetFit models for inspiration: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/models?library=setfit&sort=created", "href": "https://huggingface.co/models?library=setfit&sort=created", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "P.s. the model in the code snippet trained in 1 minute and it can classify ~6000 sentences per second on my GPU.", "raw": "P.s. the model in the code snippet trained in 1 minute and it can classify ~6000 sentences per second on my GPU.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐ŸŽ‰SetFit v1.1.0 is out! Training efficient classifiers on CPU or GPU now uses the Sentence Transformers Trainer, and we resolved a lot of issues caused by updates of third-party libraries (like Transformers). Details: Training a SetFit classifier model consists of 2 phases: 1. Finetuning a Sentence Transformer embedding model 2. Training a Classifier to map embeddings -> classes ๐Ÿ”ŒThe first phase now uses the SentenceTransformerTrainer that was introduced in the Sentence Transformers v3 update. This brings some immediate upsides like MultiGPU support, without any (intended) breaking changes. โžก๏ธ Beyond that, we softly deprecated the "evaluation_strategy" argument in favor of "eval_strategy" (following a Transformers deprecation), and deprecated Python 3.7. In return, we add official support for Python 3.11 and 3.12. โœจ There's some more minor changes too, like max_steps and eval_max_steps now being a hard limit instead of an approximate one, training/validation losses now logging nicely in Notebooks, and the "device" parameter no longer being ignored in some situations. Check out the full release notes here: https://github.com/huggingface/setfit/releases/tag/v1.1.0 Or read the documentation: https://huggingface.co/docs/setfit Or check out the public SetFit models for inspiration: https://huggingface.co/models?library=setfit&sort=created P.s. the model in the code snippet trained in 1 minute and it can classify ~6000 sentences per second on my GPU.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6317233cc92fd6fee317e030/cJHSvvimr1kqgQfHOjO5n.png", "fullname": "Tom Aarsen", "name": "tomaarsen", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1060, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6317233cc92fd6fee317e030/rAU42B5KEK4pivcLgJb_4.png" } ]
[]
[ { "reaction": "โค๏ธ", "users": [ "lbourdois", "trollek", "AtAndDev", "privategeek24", "Bruhn", "osanseviero", "louisbrulenaudet" ], "count": 7 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "lbourdois", "AtAndDev", "osanseviero" ], "count": 4 }, { "reaction": "๐Ÿ”ฅ", "users": [ "lbourdois", "AtAndDev", "cnmoro", "osanseviero" ], "count": 4 }, { "reaction": "๐Ÿš€", "users": [ "lbourdois", "AtAndDev" ], "count": 2 } ]
2024-09-19T10:33:11.000Z
2024-09-19T10:33:11.681Z
[]
/posts/tomaarsen/875775738519407
2,024
0
282477100737560
[ { "type": "text", "value": "Why would you fine-tune a model if you can just prompt an LLM? The new paper \"What is the Role of Small Models in the LLM Era: A Survey\" provides a nice pro/con overview. My go-to approach combines both:", "raw": "Why would you fine-tune a model if you can just prompt an LLM? The new paper \"What is the Role of Small Models in the LLM Era: A Survey\" provides a nice pro/con overview. My go-to approach combines both:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Start testing an idea by prompting an LLM/VLM behind an API. It's fast and easy and I avoid wasting time on tuning a model on a task that might not make it into production anyways. ", "raw": "1. Start testing an idea by prompting an LLM/VLM behind an API. It's fast and easy and I avoid wasting time on tuning a model on a task that might not make it into production anyways. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. The LLM/VLM then needs to be manually validated. Anyone seriously considering putting AI into production has to do at least some manual validation. Setting up a good validation pipeline with a tool like Argilla is crucial and it can be reused for any future experiments. Note: you can use LLM-as-a-judge to automate some evals, but you always also need to validate the judge!", "raw": "2. The LLM/VLM then needs to be manually validated. Anyone seriously considering putting AI into production has to do at least some manual validation. Setting up a good validation pipeline with a tool like Argilla is crucial and it can be reused for any future experiments. Note: you can use LLM-as-a-judge to automate some evals, but you always also need to validate the judge!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Based on this validation I can then (a) either just continue using the prompted LLM if it is accurate enough and it makes sense financially given my load; or (b) if the LLM is not accurate enough or too expensive to run in the long-run, I reuse the existing validation pipeline to annotate some additional data for fine-tuning a smaller model. This can be sped up by reusing & correcting synthetic data from the LLM (or just pure distillation).", "raw": "3. Based on this validation I can then (a) either just continue using the prompted LLM if it is accurate enough and it makes sense financially given my load; or (b) if the LLM is not accurate enough or too expensive to run in the long-run, I reuse the existing validation pipeline to annotate some additional data for fine-tuning a smaller model. This can be sped up by reusing & correcting synthetic data from the LLM (or just pure distillation).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper: ", "raw": "Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/pdf/2409.06857", "href": "https://arxiv.org/pdf/2409.06857", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Argilla docs: ", "raw": "Argilla docs: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://docs.argilla.io/latest/", "href": "https://docs.argilla.io/latest/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Argilla is also very easy to deploy with Hugging Face Spaces (or locally): ", "raw": "Argilla is also very easy to deploy with Hugging Face Spaces (or locally): ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/new-space?template=argilla%2Fargilla-template-space", "href": "https://huggingface.co/new-space?template=argilla%2Fargilla-template-space", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Why would you fine-tune a model if you can just prompt an LLM? The new paper "What is the Role of Small Models in the LLM Era: A Survey" provides a nice pro/con overview. My go-to approach combines both: 1. Start testing an idea by prompting an LLM/VLM behind an API. It's fast and easy and I avoid wasting time on tuning a model on a task that might not make it into production anyways. 2. The LLM/VLM then needs to be manually validated. Anyone seriously considering putting AI into production has to do at least some manual validation. Setting up a good validation pipeline with a tool like Argilla is crucial and it can be reused for any future experiments. Note: you can use LLM-as-a-judge to automate some evals, but you always also need to validate the judge! 3. Based on this validation I can then (a) either just continue using the prompted LLM if it is accurate enough and it makes sense financially given my load; or (b) if the LLM is not accurate enough or too expensive to run in the long-run, I reuse the existing validation pipeline to annotate some additional data for fine-tuning a smaller model. This can be sped up by reusing & correcting synthetic data from the LLM (or just pure distillation). Paper: https://arxiv.org/pdf/2409.06857 Argilla docs: https://docs.argilla.io/latest/ Argilla is also very easy to deploy with Hugging Face Spaces (or locally): https://huggingface.co/new-space?template=argilla%2Fargilla-template-space
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1613511937628-5fb15d1e84389b139cf3b508.jpeg", "fullname": "Moritz Laurer", "name": "MoritzLaurer", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 236, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5fb15d1e84389b139cf3b508/jPcKTlp0AFHQh_7IOuNTv.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5fb15d1e84389b139cf3b508/BFCDK7f4vthqa5NsaPD1g.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5fb15d1e84389b139cf3b508/Mu8eSc2KkHBriS4vT2lxs.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "nicoberk", "Tonic", "John6666", "oliverguhr", "umarbutler" ], "count": 5 }, { "reaction": "๐Ÿ”ฅ", "users": [ "oliverguhr" ], "count": 1 } ]
2024-09-19T08:21:34.000Z
2024-09-19T08:21:34.071Z
[]
/posts/MoritzLaurer/282477100737560
1,619
0
273259082873660
[ { "type": "text", "value": "๐Ÿ”ฅ ๐๐ฐ๐ž๐ง ๐ซ๐ž๐ฅ๐ž๐š๐ฌ๐ž๐ฌ ๐ญ๐ก๐ž๐ข๐ซ ๐Ÿ.๐Ÿ“ ๐Ÿ๐š๐ฆ๐ข๐ฅ๐ฒ ๐จ๐Ÿ ๐ฆ๐จ๐๐ž๐ฅ๐ฌ: ๐๐ž๐ฐ ๐’๐Ž๐“๐€ ๐Ÿ๐จ๐ซ ๐š๐ฅ๐ฅ ๐ฌ๐ข๐ณ๐ž๐ฌ ๐ฎ๐ฉ ๐ญ๐จ ๐Ÿ•๐Ÿ๐!", "raw": "๐Ÿ”ฅ ๐๐ฐ๐ž๐ง ๐ซ๐ž๐ฅ๐ž๐š๐ฌ๐ž๐ฌ ๐ญ๐ก๐ž๐ข๐ซ ๐Ÿ.๐Ÿ“ ๐Ÿ๐š๐ฆ๐ข๐ฅ๐ฒ ๐จ๐Ÿ ๐ฆ๐จ๐๐ž๐ฅ๐ฌ: ๐๐ž๐ฐ ๐’๐Ž๐“๐€ ๐Ÿ๐จ๐ซ ๐š๐ฅ๐ฅ ๐ฌ๐ข๐ณ๐ž๐ฌ ๐ฎ๐ฉ ๐ญ๐จ ๐Ÿ•๐Ÿ๐!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The Chinese LLM maker just dropped a flurry of different models, ensuring there will be a Qwen SOTA model for every application out there:", "raw": "The Chinese LLM maker just dropped a flurry of different models, ensuring there will be a Qwen SOTA model for every application out there:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Qwen2.5: 0.5B, 1.5B, 3B, 7B, 14B, 32B, and 72B", "raw": "Qwen2.5: 0.5B, 1.5B, 3B, 7B, 14B, 32B, and 72B", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Qwen2.5-Coder: 1.5B, 7B, and 32B on the way", "raw": "Qwen2.5-Coder: 1.5B, 7B, and 32B on the way", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Qwen2.5-Math: 1.5B, 7B, and 72B.", "raw": "Qwen2.5-Math: 1.5B, 7B, and 72B.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "And they didn't sleep: the performance is top of the game for each weight category!", "raw": "And they didn't sleep: the performance is top of the game for each weight category!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Š๐ž๐ฒ ๐ข๐ง๐ฌ๐ข๐ ๐ก๐ญ๐ฌ:", "raw": "๐Š๐ž๐ฒ ๐ข๐ง๐ฌ๐ข๐ ๐ก๐ญ๐ฌ:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐ŸŒ All models have ๐Ÿญ๐Ÿฎ๐Ÿด๐—ธ ๐˜๐—ผ๐—ธ๐—ฒ๐—ป ๐—ฐ๐—ผ๐—ป๐˜๐—ฒ๐˜…๐˜ ๐—น๐—ฒ๐—ป๐—ด๐˜๐—ต", "raw": "๐ŸŒ All models have ๐Ÿญ๐Ÿฎ๐Ÿด๐—ธ ๐˜๐—ผ๐—ธ๐—ฒ๐—ป ๐—ฐ๐—ผ๐—ป๐˜๐—ฒ๐˜…๐˜ ๐—น๐—ฒ๐—ป๐—ด๐˜๐—ต", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“š Models pre-trained on 18T tokens, even longer than the 15T of Llama-3", "raw": "๐Ÿ“š Models pre-trained on 18T tokens, even longer than the 15T of Llama-3", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ’ช The flagship ๐—ค๐˜„๐—ฒ๐—ป๐Ÿฎ.๐Ÿฑ-๐Ÿณ๐Ÿฎ๐—• ๐—ถ๐˜€ ~๐—ฐ๐—ผ๐—บ๐—ฝ๐—ฒ๐˜๐—ถ๐˜๐—ถ๐˜ƒ๐—ฒ ๐˜„๐—ถ๐˜๐—ต ๐—Ÿ๐—น๐—ฎ๐—บ๐—ฎ-๐Ÿฏ.๐Ÿญ-๐Ÿฐ๐Ÿฌ๐Ÿฑ๐—•, ๐—ฎ๐—ป๐—ฑ ๐—ต๐—ฎ๐˜€ ๐—ฎ ๐Ÿฏ-๐Ÿฑ% ๐—บ๐—ฎ๐—ฟ๐—ด๐—ถ๐—ป ๐—ผ๐—ป ๐—Ÿ๐—น๐—ฎ๐—บ๐—ฎ-๐Ÿฏ.๐Ÿญ-๐Ÿณ๐Ÿฌ๐—• ๐—ผ๐—ป ๐—บ๐—ผ๐˜€๐˜ ๐—ฏ๐—ฒ๐—ป๐—ฐ๐—ต๐—บ๐—ฎ๐—ฟ๐—ธ๐˜€.", "raw": "๐Ÿ’ช The flagship ๐—ค๐˜„๐—ฒ๐—ป๐Ÿฎ.๐Ÿฑ-๐Ÿณ๐Ÿฎ๐—• ๐—ถ๐˜€ ~๐—ฐ๐—ผ๐—บ๐—ฝ๐—ฒ๐˜๐—ถ๐˜๐—ถ๐˜ƒ๐—ฒ ๐˜„๐—ถ๐˜๐—ต ๐—Ÿ๐—น๐—ฎ๐—บ๐—ฎ-๐Ÿฏ.๐Ÿญ-๐Ÿฐ๐Ÿฌ๐Ÿฑ๐—•, ๐—ฎ๐—ป๐—ฑ ๐—ต๐—ฎ๐˜€ ๐—ฎ ๐Ÿฏ-๐Ÿฑ% ๐—บ๐—ฎ๐—ฟ๐—ด๐—ถ๐—ป ๐—ผ๐—ป ๐—Ÿ๐—น๐—ฎ๐—บ๐—ฎ-๐Ÿฏ.๐Ÿญ-๐Ÿณ๐Ÿฌ๐—• ๐—ผ๐—ป ๐—บ๐—ผ๐˜€๐˜ ๐—ฏ๐—ฒ๐—ป๐—ฐ๐—ต๐—บ๐—ฎ๐—ฟ๐—ธ๐˜€.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ‡ซ๐Ÿ‡ท On top of this, it ๐˜๐—ฎ๐—ธ๐—ฒ๐˜€ ๐˜๐—ต๐—ฒ #๐Ÿญ ๐˜€๐—ฝ๐—ผ๐˜ ๐—ผ๐—ป ๐—บ๐˜‚๐—น๐˜๐—ถ๐—น๐—ถ๐—ป๐—ด๐˜‚๐—ฎ๐—น ๐˜๐—ฎ๐˜€๐—ธ๐˜€ so it might become my standard for French", "raw": "๐Ÿ‡ซ๐Ÿ‡ท On top of this, it ๐˜๐—ฎ๐—ธ๐—ฒ๐˜€ ๐˜๐—ต๐—ฒ #๐Ÿญ ๐˜€๐—ฝ๐—ผ๐˜ ๐—ผ๐—ป ๐—บ๐˜‚๐—น๐˜๐—ถ๐—น๐—ถ๐—ป๐—ด๐˜‚๐—ฎ๐—น ๐˜๐—ฎ๐˜€๐—ธ๐˜€ so it might become my standard for French", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ’ป Qwen2.5-Coder is only 7B but beats competing models up to 33B (DeeSeek-Coder 33B-Instruct). Let's wait for their 32B to come out!", "raw": "๐Ÿ’ป Qwen2.5-Coder is only 7B but beats competing models up to 33B (DeeSeek-Coder 33B-Instruct). Let's wait for their 32B to come out!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿงฎ Qwen2.5-Math sets a new high in the ratio of MATH benchmark score to # of parameters. They trained it by \"aggregating more high-quality mathematical data, particularly in Chinese, from web sources, books, and codes across multiple recall cycles.\"", "raw": "๐Ÿงฎ Qwen2.5-Math sets a new high in the ratio of MATH benchmark score to # of parameters. They trained it by \"aggregating more high-quality mathematical data, particularly in Chinese, from web sources, books, and codes across multiple recall cycles.\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“„ Technical report to be released \"very soon\"", "raw": "๐Ÿ“„ Technical report to be released \"very soon\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ”“ All models have the most permissive license apache2.0, except the 72B models that have a custom license mentioning \"you can use it for free EXCEPT if your product has over 100M users\"", "raw": "๐Ÿ”“ All models have the most permissive license apache2.0, except the 72B models that have a custom license mentioning \"you can use it for free EXCEPT if your product has over 100M users\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿค— All models are available on the HF Hub! โžก๏ธ ", "raw": "๐Ÿค— All models are available on the HF Hub! โžก๏ธ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/Qwen/qwen25-66e81a666513e518adb90d9e", "href": null, "resource": { "type": "collection", "id": "Qwen/qwen25-66e81a666513e518adb90d9e", "discussionNum": null }, "url": "https://huggingface.co/collections/Qwen/qwen25-66e81a666513e518adb90d9e", "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿ”ฅ ๐๐ฐ๐ž๐ง ๐ซ๐ž๐ฅ๐ž๐š๐ฌ๐ž๐ฌ ๐ญ๐ก๐ž๐ข๐ซ ๐Ÿ.๐Ÿ“ ๐Ÿ๐š๐ฆ๐ข๐ฅ๐ฒ ๐จ๐Ÿ ๐ฆ๐จ๐๐ž๐ฅ๐ฌ: ๐๐ž๐ฐ ๐’๐Ž๐“๐€ ๐Ÿ๐จ๐ซ ๐š๐ฅ๐ฅ ๐ฌ๐ข๐ณ๐ž๐ฌ ๐ฎ๐ฉ ๐ญ๐จ ๐Ÿ•๐Ÿ๐! The Chinese LLM maker just dropped a flurry of different models, ensuring there will be a Qwen SOTA model for every application out there: Qwen2.5: 0.5B, 1.5B, 3B, 7B, 14B, 32B, and 72B Qwen2.5-Coder: 1.5B, 7B, and 32B on the way Qwen2.5-Math: 1.5B, 7B, and 72B. And they didn't sleep: the performance is top of the game for each weight category! ๐Š๐ž๐ฒ ๐ข๐ง๐ฌ๐ข๐ ๐ก๐ญ๐ฌ: ๐ŸŒ All models have ๐Ÿญ๐Ÿฎ๐Ÿด๐—ธ ๐˜๐—ผ๐—ธ๐—ฒ๐—ป ๐—ฐ๐—ผ๐—ป๐˜๐—ฒ๐˜…๐˜ ๐—น๐—ฒ๐—ป๐—ด๐˜๐—ต ๐Ÿ“š Models pre-trained on 18T tokens, even longer than the 15T of Llama-3 ๐Ÿ’ช The flagship ๐—ค๐˜„๐—ฒ๐—ป๐Ÿฎ.๐Ÿฑ-๐Ÿณ๐Ÿฎ๐—• ๐—ถ๐˜€ ~๐—ฐ๐—ผ๐—บ๐—ฝ๐—ฒ๐˜๐—ถ๐˜๐—ถ๐˜ƒ๐—ฒ ๐˜„๐—ถ๐˜๐—ต ๐—Ÿ๐—น๐—ฎ๐—บ๐—ฎ-๐Ÿฏ.๐Ÿญ-๐Ÿฐ๐Ÿฌ๐Ÿฑ๐—•, ๐—ฎ๐—ป๐—ฑ ๐—ต๐—ฎ๐˜€ ๐—ฎ ๐Ÿฏ-๐Ÿฑ% ๐—บ๐—ฎ๐—ฟ๐—ด๐—ถ๐—ป ๐—ผ๐—ป ๐—Ÿ๐—น๐—ฎ๐—บ๐—ฎ-๐Ÿฏ.๐Ÿญ-๐Ÿณ๐Ÿฌ๐—• ๐—ผ๐—ป ๐—บ๐—ผ๐˜€๐˜ ๐—ฏ๐—ฒ๐—ป๐—ฐ๐—ต๐—บ๐—ฎ๐—ฟ๐—ธ๐˜€. ๐Ÿ‡ซ๐Ÿ‡ท On top of this, it ๐˜๐—ฎ๐—ธ๐—ฒ๐˜€ ๐˜๐—ต๐—ฒ #๐Ÿญ ๐˜€๐—ฝ๐—ผ๐˜ ๐—ผ๐—ป ๐—บ๐˜‚๐—น๐˜๐—ถ๐—น๐—ถ๐—ป๐—ด๐˜‚๐—ฎ๐—น ๐˜๐—ฎ๐˜€๐—ธ๐˜€ so it might become my standard for French ๐Ÿ’ป Qwen2.5-Coder is only 7B but beats competing models up to 33B (DeeSeek-Coder 33B-Instruct). Let's wait for their 32B to come out! ๐Ÿงฎ Qwen2.5-Math sets a new high in the ratio of MATH benchmark score to # of parameters. They trained it by "aggregating more high-quality mathematical data, particularly in Chinese, from web sources, books, and codes across multiple recall cycles." ๐Ÿ“„ Technical report to be released "very soon" ๐Ÿ”“ All models have the most permissive license apache2.0, except the 72B models that have a custom license mentioning "you can use it for free EXCEPT if your product has over 100M users" ๐Ÿค— All models are available on the HF Hub! โžก๏ธ https://huggingface.co/collections/Qwen/qwen25-66e81a666513e518adb90d9e
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 494, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/Z5omvN5jP3jDfnXTxVPY2.png" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "alielfilali01", "ajibawa-2023", "louisbrulenaudet", "codito", "yeonseok-zeticai", "KingNish", "John6666", "Hassanain0108", "Wok", "santiagokomadina", "DataSoul", "v000000", "jeffboudier", "snoopynoob", "adibvafa", "DiamanteAmarelo", "nbroad", "mihaylovnikitos", "osanseviero" ], "count": 19 }, { "reaction": "โค๏ธ", "users": [ "John6666", "DiamanteAmarelo", "Svngoku", "trollek", "osanseviero" ], "count": 5 }, { "reaction": "๐Ÿš€", "users": [ "John6666", "DiamanteAmarelo", "osanseviero" ], "count": 3 } ]
2024-09-19T06:20:20.000Z
2024-09-19T14:15:29.950Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false } ]
/posts/m-ric/273259082873660
3,372
2